diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index 54d2cb92aca..97356a234f3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -1716,10 +1716,15 @@ public interface Admin extends Abortable, Closeable { * * @param enabled enabled or not * @param synchronous If true, it waits until current split() call, if outstanding, to return. + * @param skipLock if false, we will do lock before change switch. + * with the lock, other requests to change the switch will be rejected! + * And when you set it to be false, + * you should call {@link #releaseSplitOrMergeLockAndRollback()} by yourself * @param switchTypes switchType list {@link MasterSwitchType} * @return Previous switch value array */ boolean[] setSplitOrMergeEnabled(final boolean enabled, final boolean synchronous, + final boolean skipLock, final MasterSwitchType... switchTypes) throws IOException; /** @@ -1729,6 +1734,14 @@ public interface Admin extends Abortable, Closeable { */ boolean isSplitOrMergeEnabled(final MasterSwitchType switchType) throws IOException; + /** + * You should call this method after you call + * {@link #setSplitOrMergeEnabled(boolean, boolean, boolean, MasterSwitchType...)} + * with skipLock be false, this method will release the lock created by above method + * and rollback the switch state to be original state before you change switch + * */ + void releaseSplitOrMergeLockAndRollback() throws IOException; + /** * Currently, there are only two compact types: * {@code NORMAL} means do store files compaction; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java index e43a712b70f..21e7e511661 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java @@ -1750,6 +1750,13 @@ class ConnectionImplementation implements ClusterConnection, Closeable { return stub.isSplitOrMergeEnabled(controller, request); } + @Override + public MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse + releaseSplitOrMergeLockAndRollback(RpcController controller, + MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest request) throws ServiceException { + return stub.releaseSplitOrMergeLockAndRollback(controller, request); + } + @Override public IsNormalizerEnabledResponse isNormalizerEnabled(RpcController controller, IsNormalizerEnabledRequest request) throws ServiceException { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index 954196751e6..83f8bc1272e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -3570,13 +3570,13 @@ public class HBaseAdmin implements Admin { @Override public boolean[] setSplitOrMergeEnabled(final boolean enabled, final boolean synchronous, - final MasterSwitchType... switchTypes) - throws IOException { + final boolean skipLock, final MasterSwitchType... switchTypes) throws IOException { return executeCallable(new MasterCallable(getConnection()) { @Override public boolean[] call(int callTimeout) throws ServiceException { MasterProtos.SetSplitOrMergeEnabledResponse response = master.setSplitOrMergeEnabled(null, - RequestConverter.buildSetSplitOrMergeEnabledRequest(enabled, synchronous, switchTypes)); + RequestConverter.buildSetSplitOrMergeEnabledRequest(enabled, synchronous, + skipLock, switchTypes)); boolean[] result = new boolean[switchTypes.length]; int i = 0; for (Boolean prevValue : response.getPrevValueList()) { @@ -3598,6 +3598,18 @@ public class HBaseAdmin implements Admin { }); } + @Override + public void releaseSplitOrMergeLockAndRollback() throws IOException { + executeCallable(new MasterCallable(getConnection()) { + @Override + public Void call(int callTimeout) throws ServiceException { + master.releaseSplitOrMergeLockAndRollback(null, + RequestConverter.buildReleaseSplitOrMergeLockAndRollbackRequest()); + return null; + } + }); + } + private HRegionInfo getMobRegionInfo(TableName tableName) { return new HRegionInfo(tableName, Bytes.toBytes(".mob"), HConstants.EMPTY_END_ROW, false, 0); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java index 45d15a3938f..750232343b3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java @@ -104,6 +104,7 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionRequest import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.OfflineRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest; @@ -1710,6 +1711,13 @@ public final class RequestConverter { return builder.build(); } + public static ReleaseSplitOrMergeLockAndRollbackRequest + buildReleaseSplitOrMergeLockAndRollbackRequest() { + ReleaseSplitOrMergeLockAndRollbackRequest.Builder builder = + ReleaseSplitOrMergeLockAndRollbackRequest.newBuilder(); + return builder.build(); + } + /** * Creates a protocol buffer SetSplitOrMergeEnabledRequest * @@ -1720,10 +1728,11 @@ public final class RequestConverter { * @return a SetSplitOrMergeEnabledRequest */ public static SetSplitOrMergeEnabledRequest buildSetSplitOrMergeEnabledRequest(boolean enabled, - boolean synchronous, Admin.MasterSwitchType... switchTypes) { + boolean synchronous, boolean skipLock, Admin.MasterSwitchType... switchTypes) { SetSplitOrMergeEnabledRequest.Builder builder = SetSplitOrMergeEnabledRequest.newBuilder(); builder.setEnabled(enabled); builder.setSynchronous(synchronous); + builder.setSkipLock(skipLock); for (Admin.MasterSwitchType switchType : switchTypes) { builder.addSwitchTypes(convert(switchType)); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java index b6653538f3e..205d3970fb8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java @@ -117,6 +117,8 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable { private String regionNormalizerZNode; // znode containing the state of all switches, currently there are split and merge child node. private String switchZNode; + // znode containing the lock for the switches + private String switchLockZNode; // znode containing the lock for the tables public String tableLockZNode; // znode containing the state of recovering regions @@ -433,6 +435,7 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable { regionNormalizerZNode = ZKUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.regionNormalizer", "normalizer")); switchZNode = ZKUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.switch", "switch")); + switchLockZNode = ZKUtil.joinZNode(switchZNode, "locks"); tableLockZNode = ZKUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.tableLock", "table-lock")); recoveringRegionsZNode = ZKUtil.joinZNode(baseZNode, @@ -799,4 +802,11 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable { public String getSwitchZNode() { return switchZNode; } + + /** + * @return ZK node for switchLock node. + * */ + public String getSwitchLockZNode() { + return switchLockZNode; + } } diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java index b91a36b3b25..b4bd7afaa57 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java @@ -28882,6 +28882,16 @@ public final class MasterProtos { * repeated .hbase.pb.MasterSwitchType switch_types = 3; */ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterSwitchType getSwitchTypes(int index); + + // optional bool skip_lock = 4; + /** + * optional bool skip_lock = 4; + */ + boolean hasSkipLock(); + /** + * optional bool skip_lock = 4; + */ + boolean getSkipLock(); } /** * Protobuf type {@code hbase.pb.SetSplitOrMergeEnabledRequest} @@ -28977,6 +28987,11 @@ public final class MasterProtos { input.popLimit(oldLimit); break; } + case 32: { + bitField0_ |= 0x00000004; + skipLock_ = input.readBool(); + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -29074,10 +29089,27 @@ public final class MasterProtos { return switchTypes_.get(index); } + // optional bool skip_lock = 4; + public static final int SKIP_LOCK_FIELD_NUMBER = 4; + private boolean skipLock_; + /** + * optional bool skip_lock = 4; + */ + public boolean hasSkipLock() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional bool skip_lock = 4; + */ + public boolean getSkipLock() { + return skipLock_; + } + private void initFields() { enabled_ = false; synchronous_ = false; switchTypes_ = java.util.Collections.emptyList(); + skipLock_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -29104,6 +29136,9 @@ public final class MasterProtos { for (int i = 0; i < switchTypes_.size(); i++) { output.writeEnum(3, switchTypes_.get(i).getNumber()); } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBool(4, skipLock_); + } getUnknownFields().writeTo(output); } @@ -29130,6 +29165,10 @@ public final class MasterProtos { size += dataSize; size += 1 * switchTypes_.size(); } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(4, skipLock_); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -29165,6 +29204,11 @@ public final class MasterProtos { } result = result && getSwitchTypesList() .equals(other.getSwitchTypesList()); + result = result && (hasSkipLock() == other.hasSkipLock()); + if (hasSkipLock()) { + result = result && (getSkipLock() + == other.getSkipLock()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -29190,6 +29234,10 @@ public final class MasterProtos { hash = (37 * hash) + SWITCH_TYPES_FIELD_NUMBER; hash = (53 * hash) + hashEnumList(getSwitchTypesList()); } + if (hasSkipLock()) { + hash = (37 * hash) + SKIP_LOCK_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getSkipLock()); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -29305,6 +29353,8 @@ public final class MasterProtos { bitField0_ = (bitField0_ & ~0x00000002); switchTypes_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000004); + skipLock_ = false; + bitField0_ = (bitField0_ & ~0x00000008); return this; } @@ -29346,6 +29396,10 @@ public final class MasterProtos { bitField0_ = (bitField0_ & ~0x00000004); } result.switchTypes_ = switchTypes_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000004; + } + result.skipLock_ = skipLock_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -29378,6 +29432,9 @@ public final class MasterProtos { } onChanged(); } + if (other.hasSkipLock()) { + setSkipLock(other.getSkipLock()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -29547,6 +29604,39 @@ public final class MasterProtos { return this; } + // optional bool skip_lock = 4; + private boolean skipLock_ ; + /** + * optional bool skip_lock = 4; + */ + public boolean hasSkipLock() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional bool skip_lock = 4; + */ + public boolean getSkipLock() { + return skipLock_; + } + /** + * optional bool skip_lock = 4; + */ + public Builder setSkipLock(boolean value) { + bitField0_ |= 0x00000008; + skipLock_ = value; + onChanged(); + return this; + } + /** + * optional bool skip_lock = 4; + */ + public Builder clearSkipLock() { + bitField0_ = (bitField0_ & ~0x00000008); + skipLock_ = false; + onChanged(); + return this; + } + // @@protoc_insertion_point(builder_scope:hbase.pb.SetSplitOrMergeEnabledRequest) } @@ -30952,6 +31042,682 @@ public final class MasterProtos { // @@protoc_insertion_point(class_scope:hbase.pb.IsSplitOrMergeEnabledResponse) } + public interface ReleaseSplitOrMergeLockAndRollbackRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + /** + * Protobuf type {@code hbase.pb.ReleaseSplitOrMergeLockAndRollbackRequest} + */ + public static final class ReleaseSplitOrMergeLockAndRollbackRequest extends + com.google.protobuf.GeneratedMessage + implements ReleaseSplitOrMergeLockAndRollbackRequestOrBuilder { + // Use ReleaseSplitOrMergeLockAndRollbackRequest.newBuilder() to construct. + private ReleaseSplitOrMergeLockAndRollbackRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ReleaseSplitOrMergeLockAndRollbackRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ReleaseSplitOrMergeLockAndRollbackRequest defaultInstance; + public static ReleaseSplitOrMergeLockAndRollbackRequest getDefaultInstance() { + return defaultInstance; + } + + public ReleaseSplitOrMergeLockAndRollbackRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ReleaseSplitOrMergeLockAndRollbackRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ReleaseSplitOrMergeLockAndRollbackRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ReleaseSplitOrMergeLockAndRollbackRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.ReleaseSplitOrMergeLockAndRollbackRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.ReleaseSplitOrMergeLockAndRollbackRequest) + } + + static { + defaultInstance = new ReleaseSplitOrMergeLockAndRollbackRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.ReleaseSplitOrMergeLockAndRollbackRequest) + } + + public interface ReleaseSplitOrMergeLockAndRollbackResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + /** + * Protobuf type {@code hbase.pb.ReleaseSplitOrMergeLockAndRollbackResponse} + */ + public static final class ReleaseSplitOrMergeLockAndRollbackResponse extends + com.google.protobuf.GeneratedMessage + implements ReleaseSplitOrMergeLockAndRollbackResponseOrBuilder { + // Use ReleaseSplitOrMergeLockAndRollbackResponse.newBuilder() to construct. + private ReleaseSplitOrMergeLockAndRollbackResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ReleaseSplitOrMergeLockAndRollbackResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ReleaseSplitOrMergeLockAndRollbackResponse defaultInstance; + public static ReleaseSplitOrMergeLockAndRollbackResponse getDefaultInstance() { + return defaultInstance; + } + + public ReleaseSplitOrMergeLockAndRollbackResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ReleaseSplitOrMergeLockAndRollbackResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ReleaseSplitOrMergeLockAndRollbackResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ReleaseSplitOrMergeLockAndRollbackResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.ReleaseSplitOrMergeLockAndRollbackResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.ReleaseSplitOrMergeLockAndRollbackResponse) + } + + static { + defaultInstance = new ReleaseSplitOrMergeLockAndRollbackResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.ReleaseSplitOrMergeLockAndRollbackResponse) + } + public interface NormalizeRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { } @@ -59836,6 +60602,19 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest request, com.google.protobuf.RpcCallback done); + /** + * rpc ReleaseSplitOrMergeLockAndRollback(.hbase.pb.ReleaseSplitOrMergeLockAndRollbackRequest) returns (.hbase.pb.ReleaseSplitOrMergeLockAndRollbackResponse); + * + *
+       **
+       * Release lock and rollback state.
+       * 
+ */ + public abstract void releaseSplitOrMergeLockAndRollback( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest request, + com.google.protobuf.RpcCallback done); + /** * rpc Normalize(.hbase.pb.NormalizeRequest) returns (.hbase.pb.NormalizeResponse); * @@ -60420,6 +61199,14 @@ public final class MasterProtos { impl.isSplitOrMergeEnabled(controller, request, done); } + @java.lang.Override + public void releaseSplitOrMergeLockAndRollback( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest request, + com.google.protobuf.RpcCallback done) { + impl.releaseSplitOrMergeLockAndRollback(controller, request, done); + } + @java.lang.Override public void normalize( com.google.protobuf.RpcController controller, @@ -60735,64 +61522,66 @@ public final class MasterProtos { case 25: return impl.isSplitOrMergeEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest)request); case 26: - return impl.normalize(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest)request); + return impl.releaseSplitOrMergeLockAndRollback(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest)request); case 27: - return impl.setNormalizerRunning(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest)request); + return impl.normalize(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest)request); case 28: - return impl.isNormalizerEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest)request); + return impl.setNormalizerRunning(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest)request); case 29: - return impl.runCatalogScan(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest)request); + return impl.isNormalizerEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest)request); case 30: - return impl.enableCatalogJanitor(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest)request); + return impl.runCatalogScan(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest)request); case 31: - return impl.isCatalogJanitorEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest)request); + return impl.enableCatalogJanitor(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest)request); case 32: - return impl.execMasterService(controller, (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest)request); + return impl.isCatalogJanitorEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest)request); case 33: - return impl.snapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest)request); + return impl.execMasterService(controller, (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest)request); case 34: - return impl.getCompletedSnapshots(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest)request); + return impl.snapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest)request); case 35: - return impl.deleteSnapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest)request); + return impl.getCompletedSnapshots(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest)request); case 36: - return impl.isSnapshotDone(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest)request); + return impl.deleteSnapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest)request); case 37: - return impl.restoreSnapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest)request); + return impl.isSnapshotDone(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest)request); case 38: - return impl.execProcedure(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest)request); + return impl.restoreSnapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest)request); case 39: - return impl.execProcedureWithRet(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest)request); + return impl.execProcedure(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest)request); case 40: - return impl.isProcedureDone(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest)request); + return impl.execProcedureWithRet(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest)request); case 41: - return impl.modifyNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest)request); + return impl.isProcedureDone(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest)request); case 42: - return impl.createNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest)request); + return impl.modifyNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest)request); case 43: - return impl.deleteNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest)request); + return impl.createNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest)request); case 44: - return impl.getNamespaceDescriptor(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest)request); + return impl.deleteNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest)request); case 45: - return impl.listNamespaceDescriptors(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest)request); + return impl.getNamespaceDescriptor(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest)request); case 46: - return impl.listTableDescriptorsByNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest)request); + return impl.listNamespaceDescriptors(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest)request); case 47: - return impl.listTableNamesByNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest)request); + return impl.listTableDescriptorsByNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest)request); case 48: - return impl.getTableState(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest)request); + return impl.listTableNamesByNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest)request); case 49: - return impl.setQuota(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest)request); + return impl.getTableState(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest)request); case 50: - return impl.getLastMajorCompactionTimestamp(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest)request); + return impl.setQuota(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest)request); case 51: - return impl.getLastMajorCompactionTimestampForRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest)request); + return impl.getLastMajorCompactionTimestamp(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest)request); case 52: - return impl.getProcedureResult(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest)request); + return impl.getLastMajorCompactionTimestampForRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest)request); case 53: - return impl.getSecurityCapabilities(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest)request); + return impl.getProcedureResult(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest)request); case 54: - return impl.abortProcedure(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest)request); + return impl.getSecurityCapabilities(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest)request); case 55: + return impl.abortProcedure(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest)request); + case 56: return impl.listProcedures(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest)request); default: throw new java.lang.AssertionError("Can't get here."); @@ -60861,64 +61650,66 @@ public final class MasterProtos { case 25: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest.getDefaultInstance(); case 26: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest.getDefaultInstance(); case 27: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest.getDefaultInstance(); case 28: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest.getDefaultInstance(); case 29: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest.getDefaultInstance(); case 30: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest.getDefaultInstance(); case 31: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest.getDefaultInstance(); case 32: - return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest.getDefaultInstance(); case 33: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest.getDefaultInstance(); case 34: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest.getDefaultInstance(); case 35: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest.getDefaultInstance(); case 36: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest.getDefaultInstance(); case 37: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest.getDefaultInstance(); case 38: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest.getDefaultInstance(); case 39: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance(); case 40: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance(); case 41: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest.getDefaultInstance(); case 42: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest.getDefaultInstance(); case 43: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest.getDefaultInstance(); case 44: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest.getDefaultInstance(); case 45: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest.getDefaultInstance(); case 46: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest.getDefaultInstance(); case 47: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest.getDefaultInstance(); case 48: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest.getDefaultInstance(); case 49: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance(); case 50: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.getDefaultInstance(); case 51: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.getDefaultInstance(); case 52: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.getDefaultInstance(); case 53: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.getDefaultInstance(); case 54: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.getDefaultInstance(); case 55: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance(); + case 56: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); @@ -60987,64 +61778,66 @@ public final class MasterProtos { case 25: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse.getDefaultInstance(); case 26: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse.getDefaultInstance(); case 27: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeResponse.getDefaultInstance(); case 28: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningResponse.getDefaultInstance(); case 29: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse.getDefaultInstance(); case 30: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse.getDefaultInstance(); case 31: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse.getDefaultInstance(); case 32: - return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance(); case 33: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance(); case 34: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse.getDefaultInstance(); case 35: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse.getDefaultInstance(); case 36: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse.getDefaultInstance(); case 37: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse.getDefaultInstance(); case 38: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse.getDefaultInstance(); case 39: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(); case 40: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(); case 41: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance(); case 42: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse.getDefaultInstance(); case 43: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse.getDefaultInstance(); case 44: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse.getDefaultInstance(); case 45: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse.getDefaultInstance(); case 46: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse.getDefaultInstance(); case 47: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance(); case 48: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance(); case 49: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(); case 50: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance(); case 51: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(); case 52: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(); case 53: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance(); case 54: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance(); case 55: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance(); + case 56: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); @@ -61383,6 +62176,19 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest request, com.google.protobuf.RpcCallback done); + /** + * rpc ReleaseSplitOrMergeLockAndRollback(.hbase.pb.ReleaseSplitOrMergeLockAndRollbackRequest) returns (.hbase.pb.ReleaseSplitOrMergeLockAndRollbackResponse); + * + *
+     **
+     * Release lock and rollback state.
+     * 
+ */ + public abstract void releaseSplitOrMergeLockAndRollback( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest request, + com.google.protobuf.RpcCallback done); + /** * rpc Normalize(.hbase.pb.NormalizeRequest) returns (.hbase.pb.NormalizeResponse); * @@ -61907,151 +62713,156 @@ public final class MasterProtos { done)); return; case 26: + this.releaseSplitOrMergeLockAndRollback(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 27: this.normalize(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 27: + case 28: this.setNormalizerRunning(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 28: + case 29: this.isNormalizerEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 29: + case 30: this.runCatalogScan(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 30: + case 31: this.enableCatalogJanitor(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 31: + case 32: this.isCatalogJanitorEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 32: + case 33: this.execMasterService(controller, (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 33: + case 34: this.snapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 34: + case 35: this.getCompletedSnapshots(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 35: + case 36: this.deleteSnapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 36: + case 37: this.isSnapshotDone(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 37: + case 38: this.restoreSnapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 38: + case 39: this.execProcedure(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 39: + case 40: this.execProcedureWithRet(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 40: + case 41: this.isProcedureDone(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 41: + case 42: this.modifyNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 42: + case 43: this.createNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 43: + case 44: this.deleteNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 44: + case 45: this.getNamespaceDescriptor(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 45: + case 46: this.listNamespaceDescriptors(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 46: + case 47: this.listTableDescriptorsByNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 47: + case 48: this.listTableNamesByNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 48: + case 49: this.getTableState(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 49: + case 50: this.setQuota(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 50: + case 51: this.getLastMajorCompactionTimestamp(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 51: + case 52: this.getLastMajorCompactionTimestampForRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 52: + case 53: this.getProcedureResult(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 53: + case 54: this.getSecurityCapabilities(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 54: + case 55: this.abortProcedure(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 55: + case 56: this.listProcedures(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); @@ -62123,64 +62934,66 @@ public final class MasterProtos { case 25: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest.getDefaultInstance(); case 26: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest.getDefaultInstance(); case 27: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest.getDefaultInstance(); case 28: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest.getDefaultInstance(); case 29: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest.getDefaultInstance(); case 30: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest.getDefaultInstance(); case 31: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest.getDefaultInstance(); case 32: - return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest.getDefaultInstance(); case 33: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest.getDefaultInstance(); case 34: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest.getDefaultInstance(); case 35: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest.getDefaultInstance(); case 36: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest.getDefaultInstance(); case 37: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest.getDefaultInstance(); case 38: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest.getDefaultInstance(); case 39: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance(); case 40: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance(); case 41: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest.getDefaultInstance(); case 42: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest.getDefaultInstance(); case 43: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest.getDefaultInstance(); case 44: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest.getDefaultInstance(); case 45: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest.getDefaultInstance(); case 46: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest.getDefaultInstance(); case 47: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest.getDefaultInstance(); case 48: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest.getDefaultInstance(); case 49: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance(); case 50: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.getDefaultInstance(); case 51: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.getDefaultInstance(); case 52: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.getDefaultInstance(); case 53: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.getDefaultInstance(); case 54: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.getDefaultInstance(); case 55: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance(); + case 56: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); @@ -62249,64 +63062,66 @@ public final class MasterProtos { case 25: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse.getDefaultInstance(); case 26: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse.getDefaultInstance(); case 27: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeResponse.getDefaultInstance(); case 28: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningResponse.getDefaultInstance(); case 29: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse.getDefaultInstance(); case 30: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse.getDefaultInstance(); case 31: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse.getDefaultInstance(); case 32: - return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance(); case 33: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance(); case 34: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse.getDefaultInstance(); case 35: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse.getDefaultInstance(); case 36: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse.getDefaultInstance(); case 37: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse.getDefaultInstance(); case 38: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse.getDefaultInstance(); case 39: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(); case 40: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(); case 41: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance(); case 42: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse.getDefaultInstance(); case 43: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse.getDefaultInstance(); case 44: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse.getDefaultInstance(); case 45: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse.getDefaultInstance(); case 46: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse.getDefaultInstance(); case 47: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance(); case 48: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance(); case 49: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(); case 50: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance(); case 51: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(); case 52: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(); case 53: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance(); case 54: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance(); case 55: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance(); + case 56: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); @@ -62719,12 +63534,27 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse.getDefaultInstance())); } + public void releaseSplitOrMergeLockAndRollback( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(26), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse.getDefaultInstance())); + } + public void normalize( com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(26), + getDescriptor().getMethods().get(27), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeResponse.getDefaultInstance(), @@ -62739,7 +63569,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(27), + getDescriptor().getMethods().get(28), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningResponse.getDefaultInstance(), @@ -62754,7 +63584,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(28), + getDescriptor().getMethods().get(29), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse.getDefaultInstance(), @@ -62769,7 +63599,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(29), + getDescriptor().getMethods().get(30), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse.getDefaultInstance(), @@ -62784,7 +63614,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(30), + getDescriptor().getMethods().get(31), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse.getDefaultInstance(), @@ -62799,7 +63629,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(31), + getDescriptor().getMethods().get(32), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance(), @@ -62814,7 +63644,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(32), + getDescriptor().getMethods().get(33), controller, request, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance(), @@ -62829,7 +63659,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(33), + getDescriptor().getMethods().get(34), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse.getDefaultInstance(), @@ -62844,7 +63674,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(34), + getDescriptor().getMethods().get(35), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse.getDefaultInstance(), @@ -62859,7 +63689,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(35), + getDescriptor().getMethods().get(36), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse.getDefaultInstance(), @@ -62874,7 +63704,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(36), + getDescriptor().getMethods().get(37), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse.getDefaultInstance(), @@ -62889,7 +63719,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(37), + getDescriptor().getMethods().get(38), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse.getDefaultInstance(), @@ -62904,7 +63734,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(38), + getDescriptor().getMethods().get(39), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(), @@ -62919,7 +63749,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(39), + getDescriptor().getMethods().get(40), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(), @@ -62934,7 +63764,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(40), + getDescriptor().getMethods().get(41), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance(), @@ -62949,7 +63779,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(41), + getDescriptor().getMethods().get(42), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse.getDefaultInstance(), @@ -62964,7 +63794,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(42), + getDescriptor().getMethods().get(43), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse.getDefaultInstance(), @@ -62979,7 +63809,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(43), + getDescriptor().getMethods().get(44), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse.getDefaultInstance(), @@ -62994,7 +63824,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(44), + getDescriptor().getMethods().get(45), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse.getDefaultInstance(), @@ -63009,7 +63839,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(45), + getDescriptor().getMethods().get(46), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse.getDefaultInstance(), @@ -63024,7 +63854,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(46), + getDescriptor().getMethods().get(47), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance(), @@ -63039,7 +63869,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(47), + getDescriptor().getMethods().get(48), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance(), @@ -63054,7 +63884,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(48), + getDescriptor().getMethods().get(49), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(), @@ -63069,7 +63899,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(49), + getDescriptor().getMethods().get(50), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance(), @@ -63084,7 +63914,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(50), + getDescriptor().getMethods().get(51), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(), @@ -63099,7 +63929,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(51), + getDescriptor().getMethods().get(52), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(), @@ -63114,7 +63944,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(52), + getDescriptor().getMethods().get(53), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance(), @@ -63129,7 +63959,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(53), + getDescriptor().getMethods().get(54), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance(), @@ -63144,7 +63974,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(54), + getDescriptor().getMethods().get(55), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance(), @@ -63159,7 +63989,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(55), + getDescriptor().getMethods().get(56), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance(), @@ -63306,6 +64136,11 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest request) throws com.google.protobuf.ServiceException; + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse releaseSplitOrMergeLockAndRollback( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest request) + throws com.google.protobuf.ServiceException; + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeResponse normalize( com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest request) @@ -63776,12 +64611,24 @@ public final class MasterProtos { } + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse releaseSplitOrMergeLockAndRollback( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(26), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse.getDefaultInstance()); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeResponse normalize( com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(26), + getDescriptor().getMethods().get(27), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeResponse.getDefaultInstance()); @@ -63793,7 +64640,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(27), + getDescriptor().getMethods().get(28), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningResponse.getDefaultInstance()); @@ -63805,7 +64652,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(28), + getDescriptor().getMethods().get(29), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse.getDefaultInstance()); @@ -63817,7 +64664,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(29), + getDescriptor().getMethods().get(30), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse.getDefaultInstance()); @@ -63829,7 +64676,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(30), + getDescriptor().getMethods().get(31), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse.getDefaultInstance()); @@ -63841,7 +64688,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(31), + getDescriptor().getMethods().get(32), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance()); @@ -63853,7 +64700,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(32), + getDescriptor().getMethods().get(33), controller, request, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance()); @@ -63865,7 +64712,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(33), + getDescriptor().getMethods().get(34), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse.getDefaultInstance()); @@ -63877,7 +64724,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(34), + getDescriptor().getMethods().get(35), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse.getDefaultInstance()); @@ -63889,7 +64736,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(35), + getDescriptor().getMethods().get(36), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse.getDefaultInstance()); @@ -63901,7 +64748,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(36), + getDescriptor().getMethods().get(37), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse.getDefaultInstance()); @@ -63913,7 +64760,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(37), + getDescriptor().getMethods().get(38), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse.getDefaultInstance()); @@ -63925,7 +64772,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(38), + getDescriptor().getMethods().get(39), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance()); @@ -63937,7 +64784,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(39), + getDescriptor().getMethods().get(40), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance()); @@ -63949,7 +64796,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(40), + getDescriptor().getMethods().get(41), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance()); @@ -63961,7 +64808,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(41), + getDescriptor().getMethods().get(42), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse.getDefaultInstance()); @@ -63973,7 +64820,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(42), + getDescriptor().getMethods().get(43), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse.getDefaultInstance()); @@ -63985,7 +64832,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(43), + getDescriptor().getMethods().get(44), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse.getDefaultInstance()); @@ -63997,7 +64844,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(44), + getDescriptor().getMethods().get(45), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse.getDefaultInstance()); @@ -64009,7 +64856,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(45), + getDescriptor().getMethods().get(46), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse.getDefaultInstance()); @@ -64021,7 +64868,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(46), + getDescriptor().getMethods().get(47), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance()); @@ -64033,7 +64880,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(47), + getDescriptor().getMethods().get(48), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance()); @@ -64045,7 +64892,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(48), + getDescriptor().getMethods().get(49), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance()); @@ -64057,7 +64904,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(49), + getDescriptor().getMethods().get(50), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance()); @@ -64069,7 +64916,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(50), + getDescriptor().getMethods().get(51), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance()); @@ -64081,7 +64928,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(51), + getDescriptor().getMethods().get(52), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance()); @@ -64093,7 +64940,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(52), + getDescriptor().getMethods().get(53), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance()); @@ -64105,7 +64952,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(53), + getDescriptor().getMethods().get(54), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance()); @@ -64117,7 +64964,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(54), + getDescriptor().getMethods().get(55), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance()); @@ -64129,7 +64976,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(55), + getDescriptor().getMethods().get(56), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance()); @@ -64420,6 +65267,16 @@ public final class MasterProtos { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hbase_pb_IsSplitOrMergeEnabledResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackResponse_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hbase_pb_NormalizeRequest_descriptor; private static @@ -64786,238 +65643,244 @@ public final class MasterProtos { "ncerRunningResponse\022\032\n\022prev_balance_valu", "e\030\001 \001(\010\"\032\n\030IsBalancerEnabledRequest\",\n\031I" + "sBalancerEnabledResponse\022\017\n\007enabled\030\001 \002(" + - "\010\"w\n\035SetSplitOrMergeEnabledRequest\022\017\n\007en" + - "abled\030\001 \002(\010\022\023\n\013synchronous\030\002 \001(\010\0220\n\014swit" + - "ch_types\030\003 \003(\0162\032.hbase.pb.MasterSwitchTy" + - "pe\"4\n\036SetSplitOrMergeEnabledResponse\022\022\n\n" + - "prev_value\030\001 \003(\010\"O\n\034IsSplitOrMergeEnable" + - "dRequest\022/\n\013switch_type\030\001 \002(\0162\032.hbase.pb" + - ".MasterSwitchType\"0\n\035IsSplitOrMergeEnabl" + - "edResponse\022\017\n\007enabled\030\001 \002(\010\"\022\n\020Normalize", - "Request\"+\n\021NormalizeResponse\022\026\n\016normaliz" + - "er_ran\030\001 \002(\010\")\n\033SetNormalizerRunningRequ" + - "est\022\n\n\002on\030\001 \002(\010\"=\n\034SetNormalizerRunningR" + - "esponse\022\035\n\025prev_normalizer_value\030\001 \001(\010\"\034" + - "\n\032IsNormalizerEnabledRequest\".\n\033IsNormal" + - "izerEnabledResponse\022\017\n\007enabled\030\001 \002(\010\"\027\n\025" + - "RunCatalogScanRequest\"-\n\026RunCatalogScanR" + - "esponse\022\023\n\013scan_result\030\001 \001(\005\"-\n\033EnableCa" + - "talogJanitorRequest\022\016\n\006enable\030\001 \002(\010\"2\n\034E" + - "nableCatalogJanitorResponse\022\022\n\nprev_valu", - "e\030\001 \001(\010\" \n\036IsCatalogJanitorEnabledReques" + - "t\"0\n\037IsCatalogJanitorEnabledResponse\022\r\n\005" + - "value\030\001 \002(\010\"B\n\017SnapshotRequest\022/\n\010snapsh" + - "ot\030\001 \002(\0132\035.hbase.pb.SnapshotDescription\"" + - ",\n\020SnapshotResponse\022\030\n\020expected_timeout\030" + - "\001 \002(\003\"\036\n\034GetCompletedSnapshotsRequest\"Q\n" + - "\035GetCompletedSnapshotsResponse\0220\n\tsnapsh" + - "ots\030\001 \003(\0132\035.hbase.pb.SnapshotDescription" + - "\"H\n\025DeleteSnapshotRequest\022/\n\010snapshot\030\001 " + - "\002(\0132\035.hbase.pb.SnapshotDescription\"\030\n\026De", - "leteSnapshotResponse\"s\n\026RestoreSnapshotR" + - "equest\022/\n\010snapshot\030\001 \002(\0132\035.hbase.pb.Snap" + - "shotDescription\022\026\n\013nonce_group\030\002 \001(\004:\0010\022" + - "\020\n\005nonce\030\003 \001(\004:\0010\"*\n\027RestoreSnapshotResp" + - "onse\022\017\n\007proc_id\030\001 \002(\004\"H\n\025IsSnapshotDoneR" + - "equest\022/\n\010snapshot\030\001 \001(\0132\035.hbase.pb.Snap" + - "shotDescription\"^\n\026IsSnapshotDoneRespons" + - "e\022\023\n\004done\030\001 \001(\010:\005false\022/\n\010snapshot\030\002 \001(\013" + - "2\035.hbase.pb.SnapshotDescription\"O\n\034IsRes" + - "toreSnapshotDoneRequest\022/\n\010snapshot\030\001 \001(", - "\0132\035.hbase.pb.SnapshotDescription\"4\n\035IsRe" + - "storeSnapshotDoneResponse\022\023\n\004done\030\001 \001(\010:" + - "\005false\"F\n\033GetSchemaAlterStatusRequest\022\'\n" + - "\ntable_name\030\001 \002(\0132\023.hbase.pb.TableName\"T" + - "\n\034GetSchemaAlterStatusResponse\022\035\n\025yet_to" + - "_update_regions\030\001 \001(\r\022\025\n\rtotal_regions\030\002" + - " \001(\r\"\213\001\n\032GetTableDescriptorsRequest\022(\n\013t" + - "able_names\030\001 \003(\0132\023.hbase.pb.TableName\022\r\n" + - "\005regex\030\002 \001(\t\022!\n\022include_sys_tables\030\003 \001(\010" + - ":\005false\022\021\n\tnamespace\030\004 \001(\t\"J\n\033GetTableDe", - "scriptorsResponse\022+\n\014table_schema\030\001 \003(\0132" + - "\025.hbase.pb.TableSchema\"[\n\024GetTableNamesR" + - "equest\022\r\n\005regex\030\001 \001(\t\022!\n\022include_sys_tab" + - "les\030\002 \001(\010:\005false\022\021\n\tnamespace\030\003 \001(\t\"A\n\025G" + - "etTableNamesResponse\022(\n\013table_names\030\001 \003(" + - "\0132\023.hbase.pb.TableName\"?\n\024GetTableStateR" + - "equest\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.Ta" + - "bleName\"B\n\025GetTableStateResponse\022)\n\013tabl" + - "e_state\030\001 \002(\0132\024.hbase.pb.TableState\"\031\n\027G" + - "etClusterStatusRequest\"K\n\030GetClusterStat", - "usResponse\022/\n\016cluster_status\030\001 \002(\0132\027.hba" + - "se.pb.ClusterStatus\"\030\n\026IsMasterRunningRe" + - "quest\"4\n\027IsMasterRunningResponse\022\031\n\021is_m" + - "aster_running\030\001 \002(\010\"I\n\024ExecProcedureRequ" + - "est\0221\n\tprocedure\030\001 \002(\0132\036.hbase.pb.Proced" + - "ureDescription\"F\n\025ExecProcedureResponse\022" + - "\030\n\020expected_timeout\030\001 \001(\003\022\023\n\013return_data" + - "\030\002 \001(\014\"K\n\026IsProcedureDoneRequest\0221\n\tproc" + - "edure\030\001 \001(\0132\036.hbase.pb.ProcedureDescript" + - "ion\"`\n\027IsProcedureDoneResponse\022\023\n\004done\030\001", - " \001(\010:\005false\0220\n\010snapshot\030\002 \001(\0132\036.hbase.pb" + - ".ProcedureDescription\",\n\031GetProcedureRes" + - "ultRequest\022\017\n\007proc_id\030\001 \002(\004\"\371\001\n\032GetProce" + - "dureResultResponse\0229\n\005state\030\001 \002(\0162*.hbas" + - "e.pb.GetProcedureResultResponse.State\022\022\n" + - "\nstart_time\030\002 \001(\004\022\023\n\013last_update\030\003 \001(\004\022\016" + - "\n\006result\030\004 \001(\014\0224\n\texception\030\005 \001(\0132!.hbas" + - "e.pb.ForeignExceptionMessage\"1\n\005State\022\r\n" + - "\tNOT_FOUND\020\000\022\013\n\007RUNNING\020\001\022\014\n\010FINISHED\020\002\"" + - "M\n\025AbortProcedureRequest\022\017\n\007proc_id\030\001 \002(", - "\004\022#\n\025mayInterruptIfRunning\030\002 \001(\010:\004true\"6" + - "\n\026AbortProcedureResponse\022\034\n\024is_procedure" + - "_aborted\030\001 \002(\010\"\027\n\025ListProceduresRequest\"" + - "@\n\026ListProceduresResponse\022&\n\tprocedure\030\001" + - " \003(\0132\023.hbase.pb.Procedure\"\315\001\n\017SetQuotaRe" + - "quest\022\021\n\tuser_name\030\001 \001(\t\022\022\n\nuser_group\030\002" + - " \001(\t\022\021\n\tnamespace\030\003 \001(\t\022\'\n\ntable_name\030\004 " + - "\001(\0132\023.hbase.pb.TableName\022\022\n\nremove_all\030\005" + - " \001(\010\022\026\n\016bypass_globals\030\006 \001(\010\022+\n\010throttle" + - "\030\007 \001(\0132\031.hbase.pb.ThrottleRequest\"\022\n\020Set", - "QuotaResponse\"J\n\037MajorCompactionTimestam" + - "pRequest\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb." + - "TableName\"U\n(MajorCompactionTimestampFor" + - "RegionRequest\022)\n\006region\030\001 \002(\0132\031.hbase.pb" + - ".RegionSpecifier\"@\n MajorCompactionTimes" + - "tampResponse\022\034\n\024compaction_timestamp\030\001 \002" + - "(\003\"\035\n\033SecurityCapabilitiesRequest\"\354\001\n\034Se" + - "curityCapabilitiesResponse\022G\n\014capabiliti" + - "es\030\001 \003(\01621.hbase.pb.SecurityCapabilities" + - "Response.Capability\"\202\001\n\nCapability\022\031\n\025SI", - "MPLE_AUTHENTICATION\020\000\022\031\n\025SECURE_AUTHENTI" + - "CATION\020\001\022\021\n\rAUTHORIZATION\020\002\022\026\n\022CELL_AUTH" + - "ORIZATION\020\003\022\023\n\017CELL_VISIBILITY\020\004*(\n\020Mast" + - "erSwitchType\022\t\n\005SPLIT\020\000\022\t\n\005MERGE\020\0012\351\'\n\rM" + - "asterService\022e\n\024GetSchemaAlterStatus\022%.h" + - "base.pb.GetSchemaAlterStatusRequest\032&.hb" + - "ase.pb.GetSchemaAlterStatusResponse\022b\n\023G" + - "etTableDescriptors\022$.hbase.pb.GetTableDe" + - "scriptorsRequest\032%.hbase.pb.GetTableDesc" + - "riptorsResponse\022P\n\rGetTableNames\022\036.hbase", - ".pb.GetTableNamesRequest\032\037.hbase.pb.GetT" + - "ableNamesResponse\022Y\n\020GetClusterStatus\022!." + - "hbase.pb.GetClusterStatusRequest\032\".hbase" + - ".pb.GetClusterStatusResponse\022V\n\017IsMaster" + - "Running\022 .hbase.pb.IsMasterRunningReques" + - "t\032!.hbase.pb.IsMasterRunningResponse\022D\n\t" + - "AddColumn\022\032.hbase.pb.AddColumnRequest\032\033." + - "hbase.pb.AddColumnResponse\022M\n\014DeleteColu" + - "mn\022\035.hbase.pb.DeleteColumnRequest\032\036.hbas" + - "e.pb.DeleteColumnResponse\022M\n\014ModifyColum", - "n\022\035.hbase.pb.ModifyColumnRequest\032\036.hbase" + - ".pb.ModifyColumnResponse\022G\n\nMoveRegion\022\033" + - ".hbase.pb.MoveRegionRequest\032\034.hbase.pb.M" + - "oveRegionResponse\022k\n\026DispatchMergingRegi" + - "ons\022\'.hbase.pb.DispatchMergingRegionsReq" + - "uest\032(.hbase.pb.DispatchMergingRegionsRe" + - "sponse\022M\n\014AssignRegion\022\035.hbase.pb.Assign" + - "RegionRequest\032\036.hbase.pb.AssignRegionRes" + - "ponse\022S\n\016UnassignRegion\022\037.hbase.pb.Unass" + - "ignRegionRequest\032 .hbase.pb.UnassignRegi", - "onResponse\022P\n\rOfflineRegion\022\036.hbase.pb.O" + - "fflineRegionRequest\032\037.hbase.pb.OfflineRe" + - "gionResponse\022J\n\013DeleteTable\022\034.hbase.pb.D" + - "eleteTableRequest\032\035.hbase.pb.DeleteTable" + - "Response\022P\n\rtruncateTable\022\036.hbase.pb.Tru" + - "ncateTableRequest\032\037.hbase.pb.TruncateTab" + - "leResponse\022J\n\013EnableTable\022\034.hbase.pb.Ena" + - "bleTableRequest\032\035.hbase.pb.EnableTableRe" + - "sponse\022M\n\014DisableTable\022\035.hbase.pb.Disabl" + - "eTableRequest\032\036.hbase.pb.DisableTableRes", - "ponse\022J\n\013ModifyTable\022\034.hbase.pb.ModifyTa" + - "bleRequest\032\035.hbase.pb.ModifyTableRespons" + - "e\022J\n\013CreateTable\022\034.hbase.pb.CreateTableR" + - "equest\032\035.hbase.pb.CreateTableResponse\022A\n" + - "\010Shutdown\022\031.hbase.pb.ShutdownRequest\032\032.h" + - "base.pb.ShutdownResponse\022G\n\nStopMaster\022\033" + - ".hbase.pb.StopMasterRequest\032\034.hbase.pb.S" + - "topMasterResponse\022>\n\007Balance\022\030.hbase.pb." + - "BalanceRequest\032\031.hbase.pb.BalanceRespons" + - "e\022_\n\022SetBalancerRunning\022#.hbase.pb.SetBa", - "lancerRunningRequest\032$.hbase.pb.SetBalan" + - "cerRunningResponse\022\\\n\021IsBalancerEnabled\022" + - "\".hbase.pb.IsBalancerEnabledRequest\032#.hb" + - "ase.pb.IsBalancerEnabledResponse\022k\n\026SetS" + - "plitOrMergeEnabled\022\'.hbase.pb.SetSplitOr" + - "MergeEnabledRequest\032(.hbase.pb.SetSplitO" + - "rMergeEnabledResponse\022h\n\025IsSplitOrMergeE" + - "nabled\022&.hbase.pb.IsSplitOrMergeEnabledR" + - "equest\032\'.hbase.pb.IsSplitOrMergeEnabledR" + - "esponse\022D\n\tNormalize\022\032.hbase.pb.Normaliz", - "eRequest\032\033.hbase.pb.NormalizeResponse\022e\n" + - "\024SetNormalizerRunning\022%.hbase.pb.SetNorm" + - "alizerRunningRequest\032&.hbase.pb.SetNorma" + - "lizerRunningResponse\022b\n\023IsNormalizerEnab" + - "led\022$.hbase.pb.IsNormalizerEnabledReques" + - "t\032%.hbase.pb.IsNormalizerEnabledResponse" + - "\022S\n\016RunCatalogScan\022\037.hbase.pb.RunCatalog" + - "ScanRequest\032 .hbase.pb.RunCatalogScanRes" + - "ponse\022e\n\024EnableCatalogJanitor\022%.hbase.pb" + - ".EnableCatalogJanitorRequest\032&.hbase.pb.", - "EnableCatalogJanitorResponse\022n\n\027IsCatalo" + - "gJanitorEnabled\022(.hbase.pb.IsCatalogJani" + - "torEnabledRequest\032).hbase.pb.IsCatalogJa" + - "nitorEnabledResponse\022^\n\021ExecMasterServic" + - "e\022#.hbase.pb.CoprocessorServiceRequest\032$" + - ".hbase.pb.CoprocessorServiceResponse\022A\n\010" + - "Snapshot\022\031.hbase.pb.SnapshotRequest\032\032.hb" + - "ase.pb.SnapshotResponse\022h\n\025GetCompletedS" + - "napshots\022&.hbase.pb.GetCompletedSnapshot" + - "sRequest\032\'.hbase.pb.GetCompletedSnapshot", - "sResponse\022S\n\016DeleteSnapshot\022\037.hbase.pb.D" + - "eleteSnapshotRequest\032 .hbase.pb.DeleteSn" + - "apshotResponse\022S\n\016IsSnapshotDone\022\037.hbase" + - ".pb.IsSnapshotDoneRequest\032 .hbase.pb.IsS" + - "napshotDoneResponse\022V\n\017RestoreSnapshot\022 " + - ".hbase.pb.RestoreSnapshotRequest\032!.hbase" + - ".pb.RestoreSnapshotResponse\022P\n\rExecProce" + - "dure\022\036.hbase.pb.ExecProcedureRequest\032\037.h" + - "base.pb.ExecProcedureResponse\022W\n\024ExecPro" + - "cedureWithRet\022\036.hbase.pb.ExecProcedureRe", - "quest\032\037.hbase.pb.ExecProcedureResponse\022V" + - "\n\017IsProcedureDone\022 .hbase.pb.IsProcedure" + - "DoneRequest\032!.hbase.pb.IsProcedureDoneRe" + - "sponse\022V\n\017ModifyNamespace\022 .hbase.pb.Mod" + - "ifyNamespaceRequest\032!.hbase.pb.ModifyNam" + - "espaceResponse\022V\n\017CreateNamespace\022 .hbas" + - "e.pb.CreateNamespaceRequest\032!.hbase.pb.C" + - "reateNamespaceResponse\022V\n\017DeleteNamespac" + - "e\022 .hbase.pb.DeleteNamespaceRequest\032!.hb" + - "ase.pb.DeleteNamespaceResponse\022k\n\026GetNam", - "espaceDescriptor\022\'.hbase.pb.GetNamespace" + - "DescriptorRequest\032(.hbase.pb.GetNamespac" + - "eDescriptorResponse\022q\n\030ListNamespaceDesc" + - "riptors\022).hbase.pb.ListNamespaceDescript" + - "orsRequest\032*.hbase.pb.ListNamespaceDescr" + - "iptorsResponse\022\206\001\n\037ListTableDescriptorsB" + - "yNamespace\0220.hbase.pb.ListTableDescripto" + - "rsByNamespaceRequest\0321.hbase.pb.ListTabl" + - "eDescriptorsByNamespaceResponse\022t\n\031ListT" + - "ableNamesByNamespace\022*.hbase.pb.ListTabl", - "eNamesByNamespaceRequest\032+.hbase.pb.List" + - "TableNamesByNamespaceResponse\022P\n\rGetTabl" + - "eState\022\036.hbase.pb.GetTableStateRequest\032\037" + - ".hbase.pb.GetTableStateResponse\022A\n\010SetQu" + - "ota\022\031.hbase.pb.SetQuotaRequest\032\032.hbase.p" + - "b.SetQuotaResponse\022x\n\037getLastMajorCompac" + - "tionTimestamp\022).hbase.pb.MajorCompaction" + - "TimestampRequest\032*.hbase.pb.MajorCompact" + - "ionTimestampResponse\022\212\001\n(getLastMajorCom" + - "pactionTimestampForRegion\0222.hbase.pb.Maj", - "orCompactionTimestampForRegionRequest\032*." + - "hbase.pb.MajorCompactionTimestampRespons" + - "e\022_\n\022getProcedureResult\022#.hbase.pb.GetPr" + - "ocedureResultRequest\032$.hbase.pb.GetProce" + - "dureResultResponse\022h\n\027getSecurityCapabil" + - "ities\022%.hbase.pb.SecurityCapabilitiesReq" + - "uest\032&.hbase.pb.SecurityCapabilitiesResp" + - "onse\022S\n\016AbortProcedure\022\037.hbase.pb.AbortP" + - "rocedureRequest\032 .hbase.pb.AbortProcedur" + - "eResponse\022S\n\016ListProcedures\022\037.hbase.pb.L", - "istProceduresRequest\032 .hbase.pb.ListProc" + - "eduresResponseBB\n*org.apache.hadoop.hbas" + - "e.protobuf.generatedB\014MasterProtosH\001\210\001\001\240" + - "\001\001" + "\010\"\212\001\n\035SetSplitOrMergeEnabledRequest\022\017\n\007e" + + "nabled\030\001 \002(\010\022\023\n\013synchronous\030\002 \001(\010\0220\n\014swi" + + "tch_types\030\003 \003(\0162\032.hbase.pb.MasterSwitchT" + + "ype\022\021\n\tskip_lock\030\004 \001(\010\"4\n\036SetSplitOrMerg" + + "eEnabledResponse\022\022\n\nprev_value\030\001 \003(\010\"O\n\034" + + "IsSplitOrMergeEnabledRequest\022/\n\013switch_t" + + "ype\030\001 \002(\0162\032.hbase.pb.MasterSwitchType\"0\n" + + "\035IsSplitOrMergeEnabledResponse\022\017\n\007enable", + "d\030\001 \002(\010\"+\n)ReleaseSplitOrMergeLockAndRol" + + "lbackRequest\",\n*ReleaseSplitOrMergeLockA" + + "ndRollbackResponse\"\022\n\020NormalizeRequest\"+" + + "\n\021NormalizeResponse\022\026\n\016normalizer_ran\030\001 " + + "\002(\010\")\n\033SetNormalizerRunningRequest\022\n\n\002on" + + "\030\001 \002(\010\"=\n\034SetNormalizerRunningResponse\022\035" + + "\n\025prev_normalizer_value\030\001 \001(\010\"\034\n\032IsNorma" + + "lizerEnabledRequest\".\n\033IsNormalizerEnabl" + + "edResponse\022\017\n\007enabled\030\001 \002(\010\"\027\n\025RunCatalo" + + "gScanRequest\"-\n\026RunCatalogScanResponse\022\023", + "\n\013scan_result\030\001 \001(\005\"-\n\033EnableCatalogJani" + + "torRequest\022\016\n\006enable\030\001 \002(\010\"2\n\034EnableCata" + + "logJanitorResponse\022\022\n\nprev_value\030\001 \001(\010\" " + + "\n\036IsCatalogJanitorEnabledRequest\"0\n\037IsCa" + + "talogJanitorEnabledResponse\022\r\n\005value\030\001 \002" + + "(\010\"B\n\017SnapshotRequest\022/\n\010snapshot\030\001 \002(\0132" + + "\035.hbase.pb.SnapshotDescription\",\n\020Snapsh" + + "otResponse\022\030\n\020expected_timeout\030\001 \002(\003\"\036\n\034" + + "GetCompletedSnapshotsRequest\"Q\n\035GetCompl" + + "etedSnapshotsResponse\0220\n\tsnapshots\030\001 \003(\013", + "2\035.hbase.pb.SnapshotDescription\"H\n\025Delet" + + "eSnapshotRequest\022/\n\010snapshot\030\001 \002(\0132\035.hba" + + "se.pb.SnapshotDescription\"\030\n\026DeleteSnaps" + + "hotResponse\"s\n\026RestoreSnapshotRequest\022/\n" + + "\010snapshot\030\001 \002(\0132\035.hbase.pb.SnapshotDescr" + + "iption\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n\005nonce\030" + + "\003 \001(\004:\0010\"*\n\027RestoreSnapshotResponse\022\017\n\007p" + + "roc_id\030\001 \002(\004\"H\n\025IsSnapshotDoneRequest\022/\n" + + "\010snapshot\030\001 \001(\0132\035.hbase.pb.SnapshotDescr" + + "iption\"^\n\026IsSnapshotDoneResponse\022\023\n\004done", + "\030\001 \001(\010:\005false\022/\n\010snapshot\030\002 \001(\0132\035.hbase." + + "pb.SnapshotDescription\"O\n\034IsRestoreSnaps" + + "hotDoneRequest\022/\n\010snapshot\030\001 \001(\0132\035.hbase" + + ".pb.SnapshotDescription\"4\n\035IsRestoreSnap" + + "shotDoneResponse\022\023\n\004done\030\001 \001(\010:\005false\"F\n" + + "\033GetSchemaAlterStatusRequest\022\'\n\ntable_na" + + "me\030\001 \002(\0132\023.hbase.pb.TableName\"T\n\034GetSche" + + "maAlterStatusResponse\022\035\n\025yet_to_update_r" + + "egions\030\001 \001(\r\022\025\n\rtotal_regions\030\002 \001(\r\"\213\001\n\032" + + "GetTableDescriptorsRequest\022(\n\013table_name", + "s\030\001 \003(\0132\023.hbase.pb.TableName\022\r\n\005regex\030\002 " + + "\001(\t\022!\n\022include_sys_tables\030\003 \001(\010:\005false\022\021" + + "\n\tnamespace\030\004 \001(\t\"J\n\033GetTableDescriptors" + + "Response\022+\n\014table_schema\030\001 \003(\0132\025.hbase.p" + + "b.TableSchema\"[\n\024GetTableNamesRequest\022\r\n" + + "\005regex\030\001 \001(\t\022!\n\022include_sys_tables\030\002 \001(\010" + + ":\005false\022\021\n\tnamespace\030\003 \001(\t\"A\n\025GetTableNa" + + "mesResponse\022(\n\013table_names\030\001 \003(\0132\023.hbase" + + ".pb.TableName\"?\n\024GetTableStateRequest\022\'\n" + + "\ntable_name\030\001 \002(\0132\023.hbase.pb.TableName\"B", + "\n\025GetTableStateResponse\022)\n\013table_state\030\001" + + " \002(\0132\024.hbase.pb.TableState\"\031\n\027GetCluster" + + "StatusRequest\"K\n\030GetClusterStatusRespons" + + "e\022/\n\016cluster_status\030\001 \002(\0132\027.hbase.pb.Clu" + + "sterStatus\"\030\n\026IsMasterRunningRequest\"4\n\027" + + "IsMasterRunningResponse\022\031\n\021is_master_run" + + "ning\030\001 \002(\010\"I\n\024ExecProcedureRequest\0221\n\tpr" + + "ocedure\030\001 \002(\0132\036.hbase.pb.ProcedureDescri" + + "ption\"F\n\025ExecProcedureResponse\022\030\n\020expect" + + "ed_timeout\030\001 \001(\003\022\023\n\013return_data\030\002 \001(\014\"K\n", + "\026IsProcedureDoneRequest\0221\n\tprocedure\030\001 \001" + + "(\0132\036.hbase.pb.ProcedureDescription\"`\n\027Is" + + "ProcedureDoneResponse\022\023\n\004done\030\001 \001(\010:\005fal" + + "se\0220\n\010snapshot\030\002 \001(\0132\036.hbase.pb.Procedur" + + "eDescription\",\n\031GetProcedureResultReques" + + "t\022\017\n\007proc_id\030\001 \002(\004\"\371\001\n\032GetProcedureResul" + + "tResponse\0229\n\005state\030\001 \002(\0162*.hbase.pb.GetP" + + "rocedureResultResponse.State\022\022\n\nstart_ti" + + "me\030\002 \001(\004\022\023\n\013last_update\030\003 \001(\004\022\016\n\006result\030" + + "\004 \001(\014\0224\n\texception\030\005 \001(\0132!.hbase.pb.Fore", + "ignExceptionMessage\"1\n\005State\022\r\n\tNOT_FOUN" + + "D\020\000\022\013\n\007RUNNING\020\001\022\014\n\010FINISHED\020\002\"M\n\025AbortP" + + "rocedureRequest\022\017\n\007proc_id\030\001 \002(\004\022#\n\025mayI" + + "nterruptIfRunning\030\002 \001(\010:\004true\"6\n\026AbortPr" + + "ocedureResponse\022\034\n\024is_procedure_aborted\030" + + "\001 \002(\010\"\027\n\025ListProceduresRequest\"@\n\026ListPr" + + "oceduresResponse\022&\n\tprocedure\030\001 \003(\0132\023.hb" + + "ase.pb.Procedure\"\315\001\n\017SetQuotaRequest\022\021\n\t" + + "user_name\030\001 \001(\t\022\022\n\nuser_group\030\002 \001(\t\022\021\n\tn" + + "amespace\030\003 \001(\t\022\'\n\ntable_name\030\004 \001(\0132\023.hba", + "se.pb.TableName\022\022\n\nremove_all\030\005 \001(\010\022\026\n\016b" + + "ypass_globals\030\006 \001(\010\022+\n\010throttle\030\007 \001(\0132\031." + + "hbase.pb.ThrottleRequest\"\022\n\020SetQuotaResp" + + "onse\"J\n\037MajorCompactionTimestampRequest\022" + + "\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.TableName" + + "\"U\n(MajorCompactionTimestampForRegionReq" + + "uest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSp" + + "ecifier\"@\n MajorCompactionTimestampRespo" + + "nse\022\034\n\024compaction_timestamp\030\001 \002(\003\"\035\n\033Sec" + + "urityCapabilitiesRequest\"\354\001\n\034SecurityCap", + "abilitiesResponse\022G\n\014capabilities\030\001 \003(\0162" + + "1.hbase.pb.SecurityCapabilitiesResponse." + + "Capability\"\202\001\n\nCapability\022\031\n\025SIMPLE_AUTH" + + "ENTICATION\020\000\022\031\n\025SECURE_AUTHENTICATION\020\001\022" + + "\021\n\rAUTHORIZATION\020\002\022\026\n\022CELL_AUTHORIZATION" + + "\020\003\022\023\n\017CELL_VISIBILITY\020\004*(\n\020MasterSwitchT" + + "ype\022\t\n\005SPLIT\020\000\022\t\n\005MERGE\020\0012\373(\n\rMasterServ" + + "ice\022e\n\024GetSchemaAlterStatus\022%.hbase.pb.G" + + "etSchemaAlterStatusRequest\032&.hbase.pb.Ge" + + "tSchemaAlterStatusResponse\022b\n\023GetTableDe", + "scriptors\022$.hbase.pb.GetTableDescriptors" + + "Request\032%.hbase.pb.GetTableDescriptorsRe" + + "sponse\022P\n\rGetTableNames\022\036.hbase.pb.GetTa" + + "bleNamesRequest\032\037.hbase.pb.GetTableNames" + + "Response\022Y\n\020GetClusterStatus\022!.hbase.pb." + + "GetClusterStatusRequest\032\".hbase.pb.GetCl" + + "usterStatusResponse\022V\n\017IsMasterRunning\022 " + + ".hbase.pb.IsMasterRunningRequest\032!.hbase" + + ".pb.IsMasterRunningResponse\022D\n\tAddColumn" + + "\022\032.hbase.pb.AddColumnRequest\032\033.hbase.pb.", + "AddColumnResponse\022M\n\014DeleteColumn\022\035.hbas" + + "e.pb.DeleteColumnRequest\032\036.hbase.pb.Dele" + + "teColumnResponse\022M\n\014ModifyColumn\022\035.hbase" + + ".pb.ModifyColumnRequest\032\036.hbase.pb.Modif" + + "yColumnResponse\022G\n\nMoveRegion\022\033.hbase.pb" + + ".MoveRegionRequest\032\034.hbase.pb.MoveRegion" + + "Response\022k\n\026DispatchMergingRegions\022\'.hba" + + "se.pb.DispatchMergingRegionsRequest\032(.hb" + + "ase.pb.DispatchMergingRegionsResponse\022M\n" + + "\014AssignRegion\022\035.hbase.pb.AssignRegionReq", + "uest\032\036.hbase.pb.AssignRegionResponse\022S\n\016" + + "UnassignRegion\022\037.hbase.pb.UnassignRegion" + + "Request\032 .hbase.pb.UnassignRegionRespons" + + "e\022P\n\rOfflineRegion\022\036.hbase.pb.OfflineReg" + + "ionRequest\032\037.hbase.pb.OfflineRegionRespo" + + "nse\022J\n\013DeleteTable\022\034.hbase.pb.DeleteTabl" + + "eRequest\032\035.hbase.pb.DeleteTableResponse\022" + + "P\n\rtruncateTable\022\036.hbase.pb.TruncateTabl" + + "eRequest\032\037.hbase.pb.TruncateTableRespons" + + "e\022J\n\013EnableTable\022\034.hbase.pb.EnableTableR", + "equest\032\035.hbase.pb.EnableTableResponse\022M\n" + + "\014DisableTable\022\035.hbase.pb.DisableTableReq" + + "uest\032\036.hbase.pb.DisableTableResponse\022J\n\013" + + "ModifyTable\022\034.hbase.pb.ModifyTableReques" + + "t\032\035.hbase.pb.ModifyTableResponse\022J\n\013Crea" + + "teTable\022\034.hbase.pb.CreateTableRequest\032\035." + + "hbase.pb.CreateTableResponse\022A\n\010Shutdown" + + "\022\031.hbase.pb.ShutdownRequest\032\032.hbase.pb.S" + + "hutdownResponse\022G\n\nStopMaster\022\033.hbase.pb" + + ".StopMasterRequest\032\034.hbase.pb.StopMaster", + "Response\022>\n\007Balance\022\030.hbase.pb.BalanceRe" + + "quest\032\031.hbase.pb.BalanceResponse\022_\n\022SetB" + + "alancerRunning\022#.hbase.pb.SetBalancerRun" + + "ningRequest\032$.hbase.pb.SetBalancerRunnin" + + "gResponse\022\\\n\021IsBalancerEnabled\022\".hbase.p" + + "b.IsBalancerEnabledRequest\032#.hbase.pb.Is" + + "BalancerEnabledResponse\022k\n\026SetSplitOrMer" + + "geEnabled\022\'.hbase.pb.SetSplitOrMergeEnab" + + "ledRequest\032(.hbase.pb.SetSplitOrMergeEna" + + "bledResponse\022h\n\025IsSplitOrMergeEnabled\022&.", + "hbase.pb.IsSplitOrMergeEnabledRequest\032\'." + + "hbase.pb.IsSplitOrMergeEnabledResponse\022\217" + + "\001\n\"ReleaseSplitOrMergeLockAndRollback\0223." + + "hbase.pb.ReleaseSplitOrMergeLockAndRollb" + + "ackRequest\0324.hbase.pb.ReleaseSplitOrMerg" + + "eLockAndRollbackResponse\022D\n\tNormalize\022\032." + + "hbase.pb.NormalizeRequest\032\033.hbase.pb.Nor" + + "malizeResponse\022e\n\024SetNormalizerRunning\022%" + + ".hbase.pb.SetNormalizerRunningRequest\032&." + + "hbase.pb.SetNormalizerRunningResponse\022b\n", + "\023IsNormalizerEnabled\022$.hbase.pb.IsNormal" + + "izerEnabledRequest\032%.hbase.pb.IsNormaliz" + + "erEnabledResponse\022S\n\016RunCatalogScan\022\037.hb" + + "ase.pb.RunCatalogScanRequest\032 .hbase.pb." + + "RunCatalogScanResponse\022e\n\024EnableCatalogJ" + + "anitor\022%.hbase.pb.EnableCatalogJanitorRe" + + "quest\032&.hbase.pb.EnableCatalogJanitorRes" + + "ponse\022n\n\027IsCatalogJanitorEnabled\022(.hbase" + + ".pb.IsCatalogJanitorEnabledRequest\032).hba" + + "se.pb.IsCatalogJanitorEnabledResponse\022^\n", + "\021ExecMasterService\022#.hbase.pb.Coprocesso" + + "rServiceRequest\032$.hbase.pb.CoprocessorSe" + + "rviceResponse\022A\n\010Snapshot\022\031.hbase.pb.Sna" + + "pshotRequest\032\032.hbase.pb.SnapshotResponse" + + "\022h\n\025GetCompletedSnapshots\022&.hbase.pb.Get" + + "CompletedSnapshotsRequest\032\'.hbase.pb.Get" + + "CompletedSnapshotsResponse\022S\n\016DeleteSnap" + + "shot\022\037.hbase.pb.DeleteSnapshotRequest\032 ." + + "hbase.pb.DeleteSnapshotResponse\022S\n\016IsSna" + + "pshotDone\022\037.hbase.pb.IsSnapshotDoneReque", + "st\032 .hbase.pb.IsSnapshotDoneResponse\022V\n\017" + + "RestoreSnapshot\022 .hbase.pb.RestoreSnapsh" + + "otRequest\032!.hbase.pb.RestoreSnapshotResp" + + "onse\022P\n\rExecProcedure\022\036.hbase.pb.ExecPro" + + "cedureRequest\032\037.hbase.pb.ExecProcedureRe" + + "sponse\022W\n\024ExecProcedureWithRet\022\036.hbase.p" + + "b.ExecProcedureRequest\032\037.hbase.pb.ExecPr" + + "ocedureResponse\022V\n\017IsProcedureDone\022 .hba" + + "se.pb.IsProcedureDoneRequest\032!.hbase.pb." + + "IsProcedureDoneResponse\022V\n\017ModifyNamespa", + "ce\022 .hbase.pb.ModifyNamespaceRequest\032!.h" + + "base.pb.ModifyNamespaceResponse\022V\n\017Creat" + + "eNamespace\022 .hbase.pb.CreateNamespaceReq" + + "uest\032!.hbase.pb.CreateNamespaceResponse\022" + + "V\n\017DeleteNamespace\022 .hbase.pb.DeleteName" + + "spaceRequest\032!.hbase.pb.DeleteNamespaceR" + + "esponse\022k\n\026GetNamespaceDescriptor\022\'.hbas" + + "e.pb.GetNamespaceDescriptorRequest\032(.hba" + + "se.pb.GetNamespaceDescriptorResponse\022q\n\030" + + "ListNamespaceDescriptors\022).hbase.pb.List", + "NamespaceDescriptorsRequest\032*.hbase.pb.L" + + "istNamespaceDescriptorsResponse\022\206\001\n\037List" + + "TableDescriptorsByNamespace\0220.hbase.pb.L" + + "istTableDescriptorsByNamespaceRequest\0321." + + "hbase.pb.ListTableDescriptorsByNamespace" + + "Response\022t\n\031ListTableNamesByNamespace\022*." + + "hbase.pb.ListTableNamesByNamespaceReques" + + "t\032+.hbase.pb.ListTableNamesByNamespaceRe" + + "sponse\022P\n\rGetTableState\022\036.hbase.pb.GetTa" + + "bleStateRequest\032\037.hbase.pb.GetTableState", + "Response\022A\n\010SetQuota\022\031.hbase.pb.SetQuota" + + "Request\032\032.hbase.pb.SetQuotaResponse\022x\n\037g" + + "etLastMajorCompactionTimestamp\022).hbase.p" + + "b.MajorCompactionTimestampRequest\032*.hbas" + + "e.pb.MajorCompactionTimestampResponse\022\212\001" + + "\n(getLastMajorCompactionTimestampForRegi" + + "on\0222.hbase.pb.MajorCompactionTimestampFo" + + "rRegionRequest\032*.hbase.pb.MajorCompactio" + + "nTimestampResponse\022_\n\022getProcedureResult" + + "\022#.hbase.pb.GetProcedureResultRequest\032$.", + "hbase.pb.GetProcedureResultResponse\022h\n\027g" + + "etSecurityCapabilities\022%.hbase.pb.Securi" + + "tyCapabilitiesRequest\032&.hbase.pb.Securit" + + "yCapabilitiesResponse\022S\n\016AbortProcedure\022" + + "\037.hbase.pb.AbortProcedureRequest\032 .hbase" + + ".pb.AbortProcedureResponse\022S\n\016ListProced" + + "ures\022\037.hbase.pb.ListProceduresRequest\032 ." + + "hbase.pb.ListProceduresResponseBB\n*org.a" + + "pache.hadoop.hbase.protobuf.generatedB\014M" + + "asterProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -65341,7 +66204,7 @@ public final class MasterProtos { internal_static_hbase_pb_SetSplitOrMergeEnabledRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SetSplitOrMergeEnabledRequest_descriptor, - new java.lang.String[] { "Enabled", "Synchronous", "SwitchTypes", }); + new java.lang.String[] { "Enabled", "Synchronous", "SwitchTypes", "SkipLock", }); internal_static_hbase_pb_SetSplitOrMergeEnabledResponse_descriptor = getDescriptor().getMessageTypes().get(53); internal_static_hbase_pb_SetSplitOrMergeEnabledResponse_fieldAccessorTable = new @@ -65360,320 +66223,332 @@ public final class MasterProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_IsSplitOrMergeEnabledResponse_descriptor, new java.lang.String[] { "Enabled", }); - internal_static_hbase_pb_NormalizeRequest_descriptor = + internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackRequest_descriptor = getDescriptor().getMessageTypes().get(56); + internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackRequest_descriptor, + new java.lang.String[] { }); + internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackResponse_descriptor = + getDescriptor().getMessageTypes().get(57); + internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackResponse_descriptor, + new java.lang.String[] { }); + internal_static_hbase_pb_NormalizeRequest_descriptor = + getDescriptor().getMessageTypes().get(58); internal_static_hbase_pb_NormalizeRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_NormalizeRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_NormalizeResponse_descriptor = - getDescriptor().getMessageTypes().get(57); + getDescriptor().getMessageTypes().get(59); internal_static_hbase_pb_NormalizeResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_NormalizeResponse_descriptor, new java.lang.String[] { "NormalizerRan", }); internal_static_hbase_pb_SetNormalizerRunningRequest_descriptor = - getDescriptor().getMessageTypes().get(58); + getDescriptor().getMessageTypes().get(60); internal_static_hbase_pb_SetNormalizerRunningRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SetNormalizerRunningRequest_descriptor, new java.lang.String[] { "On", }); internal_static_hbase_pb_SetNormalizerRunningResponse_descriptor = - getDescriptor().getMessageTypes().get(59); + getDescriptor().getMessageTypes().get(61); internal_static_hbase_pb_SetNormalizerRunningResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SetNormalizerRunningResponse_descriptor, new java.lang.String[] { "PrevNormalizerValue", }); internal_static_hbase_pb_IsNormalizerEnabledRequest_descriptor = - getDescriptor().getMessageTypes().get(60); + getDescriptor().getMessageTypes().get(62); internal_static_hbase_pb_IsNormalizerEnabledRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_IsNormalizerEnabledRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_IsNormalizerEnabledResponse_descriptor = - getDescriptor().getMessageTypes().get(61); + getDescriptor().getMessageTypes().get(63); internal_static_hbase_pb_IsNormalizerEnabledResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_IsNormalizerEnabledResponse_descriptor, new java.lang.String[] { "Enabled", }); internal_static_hbase_pb_RunCatalogScanRequest_descriptor = - getDescriptor().getMessageTypes().get(62); + getDescriptor().getMessageTypes().get(64); internal_static_hbase_pb_RunCatalogScanRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_RunCatalogScanRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_RunCatalogScanResponse_descriptor = - getDescriptor().getMessageTypes().get(63); + getDescriptor().getMessageTypes().get(65); internal_static_hbase_pb_RunCatalogScanResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_RunCatalogScanResponse_descriptor, new java.lang.String[] { "ScanResult", }); internal_static_hbase_pb_EnableCatalogJanitorRequest_descriptor = - getDescriptor().getMessageTypes().get(64); + getDescriptor().getMessageTypes().get(66); internal_static_hbase_pb_EnableCatalogJanitorRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_EnableCatalogJanitorRequest_descriptor, new java.lang.String[] { "Enable", }); internal_static_hbase_pb_EnableCatalogJanitorResponse_descriptor = - getDescriptor().getMessageTypes().get(65); + getDescriptor().getMessageTypes().get(67); internal_static_hbase_pb_EnableCatalogJanitorResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_EnableCatalogJanitorResponse_descriptor, new java.lang.String[] { "PrevValue", }); internal_static_hbase_pb_IsCatalogJanitorEnabledRequest_descriptor = - getDescriptor().getMessageTypes().get(66); + getDescriptor().getMessageTypes().get(68); internal_static_hbase_pb_IsCatalogJanitorEnabledRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_IsCatalogJanitorEnabledRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_IsCatalogJanitorEnabledResponse_descriptor = - getDescriptor().getMessageTypes().get(67); + getDescriptor().getMessageTypes().get(69); internal_static_hbase_pb_IsCatalogJanitorEnabledResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_IsCatalogJanitorEnabledResponse_descriptor, new java.lang.String[] { "Value", }); internal_static_hbase_pb_SnapshotRequest_descriptor = - getDescriptor().getMessageTypes().get(68); + getDescriptor().getMessageTypes().get(70); internal_static_hbase_pb_SnapshotRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SnapshotRequest_descriptor, new java.lang.String[] { "Snapshot", }); internal_static_hbase_pb_SnapshotResponse_descriptor = - getDescriptor().getMessageTypes().get(69); + getDescriptor().getMessageTypes().get(71); internal_static_hbase_pb_SnapshotResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SnapshotResponse_descriptor, new java.lang.String[] { "ExpectedTimeout", }); internal_static_hbase_pb_GetCompletedSnapshotsRequest_descriptor = - getDescriptor().getMessageTypes().get(70); + getDescriptor().getMessageTypes().get(72); internal_static_hbase_pb_GetCompletedSnapshotsRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetCompletedSnapshotsRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_GetCompletedSnapshotsResponse_descriptor = - getDescriptor().getMessageTypes().get(71); + getDescriptor().getMessageTypes().get(73); internal_static_hbase_pb_GetCompletedSnapshotsResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetCompletedSnapshotsResponse_descriptor, new java.lang.String[] { "Snapshots", }); internal_static_hbase_pb_DeleteSnapshotRequest_descriptor = - getDescriptor().getMessageTypes().get(72); + getDescriptor().getMessageTypes().get(74); internal_static_hbase_pb_DeleteSnapshotRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_DeleteSnapshotRequest_descriptor, new java.lang.String[] { "Snapshot", }); internal_static_hbase_pb_DeleteSnapshotResponse_descriptor = - getDescriptor().getMessageTypes().get(73); + getDescriptor().getMessageTypes().get(75); internal_static_hbase_pb_DeleteSnapshotResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_DeleteSnapshotResponse_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_RestoreSnapshotRequest_descriptor = - getDescriptor().getMessageTypes().get(74); + getDescriptor().getMessageTypes().get(76); internal_static_hbase_pb_RestoreSnapshotRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_RestoreSnapshotRequest_descriptor, new java.lang.String[] { "Snapshot", "NonceGroup", "Nonce", }); internal_static_hbase_pb_RestoreSnapshotResponse_descriptor = - getDescriptor().getMessageTypes().get(75); + getDescriptor().getMessageTypes().get(77); internal_static_hbase_pb_RestoreSnapshotResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_RestoreSnapshotResponse_descriptor, new java.lang.String[] { "ProcId", }); internal_static_hbase_pb_IsSnapshotDoneRequest_descriptor = - getDescriptor().getMessageTypes().get(76); + getDescriptor().getMessageTypes().get(78); internal_static_hbase_pb_IsSnapshotDoneRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_IsSnapshotDoneRequest_descriptor, new java.lang.String[] { "Snapshot", }); internal_static_hbase_pb_IsSnapshotDoneResponse_descriptor = - getDescriptor().getMessageTypes().get(77); + getDescriptor().getMessageTypes().get(79); internal_static_hbase_pb_IsSnapshotDoneResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_IsSnapshotDoneResponse_descriptor, new java.lang.String[] { "Done", "Snapshot", }); internal_static_hbase_pb_IsRestoreSnapshotDoneRequest_descriptor = - getDescriptor().getMessageTypes().get(78); + getDescriptor().getMessageTypes().get(80); internal_static_hbase_pb_IsRestoreSnapshotDoneRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_IsRestoreSnapshotDoneRequest_descriptor, new java.lang.String[] { "Snapshot", }); internal_static_hbase_pb_IsRestoreSnapshotDoneResponse_descriptor = - getDescriptor().getMessageTypes().get(79); + getDescriptor().getMessageTypes().get(81); internal_static_hbase_pb_IsRestoreSnapshotDoneResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_IsRestoreSnapshotDoneResponse_descriptor, new java.lang.String[] { "Done", }); internal_static_hbase_pb_GetSchemaAlterStatusRequest_descriptor = - getDescriptor().getMessageTypes().get(80); + getDescriptor().getMessageTypes().get(82); internal_static_hbase_pb_GetSchemaAlterStatusRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetSchemaAlterStatusRequest_descriptor, new java.lang.String[] { "TableName", }); internal_static_hbase_pb_GetSchemaAlterStatusResponse_descriptor = - getDescriptor().getMessageTypes().get(81); + getDescriptor().getMessageTypes().get(83); internal_static_hbase_pb_GetSchemaAlterStatusResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetSchemaAlterStatusResponse_descriptor, new java.lang.String[] { "YetToUpdateRegions", "TotalRegions", }); internal_static_hbase_pb_GetTableDescriptorsRequest_descriptor = - getDescriptor().getMessageTypes().get(82); + getDescriptor().getMessageTypes().get(84); internal_static_hbase_pb_GetTableDescriptorsRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetTableDescriptorsRequest_descriptor, new java.lang.String[] { "TableNames", "Regex", "IncludeSysTables", "Namespace", }); internal_static_hbase_pb_GetTableDescriptorsResponse_descriptor = - getDescriptor().getMessageTypes().get(83); + getDescriptor().getMessageTypes().get(85); internal_static_hbase_pb_GetTableDescriptorsResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetTableDescriptorsResponse_descriptor, new java.lang.String[] { "TableSchema", }); internal_static_hbase_pb_GetTableNamesRequest_descriptor = - getDescriptor().getMessageTypes().get(84); + getDescriptor().getMessageTypes().get(86); internal_static_hbase_pb_GetTableNamesRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetTableNamesRequest_descriptor, new java.lang.String[] { "Regex", "IncludeSysTables", "Namespace", }); internal_static_hbase_pb_GetTableNamesResponse_descriptor = - getDescriptor().getMessageTypes().get(85); + getDescriptor().getMessageTypes().get(87); internal_static_hbase_pb_GetTableNamesResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetTableNamesResponse_descriptor, new java.lang.String[] { "TableNames", }); internal_static_hbase_pb_GetTableStateRequest_descriptor = - getDescriptor().getMessageTypes().get(86); + getDescriptor().getMessageTypes().get(88); internal_static_hbase_pb_GetTableStateRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetTableStateRequest_descriptor, new java.lang.String[] { "TableName", }); internal_static_hbase_pb_GetTableStateResponse_descriptor = - getDescriptor().getMessageTypes().get(87); + getDescriptor().getMessageTypes().get(89); internal_static_hbase_pb_GetTableStateResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetTableStateResponse_descriptor, new java.lang.String[] { "TableState", }); internal_static_hbase_pb_GetClusterStatusRequest_descriptor = - getDescriptor().getMessageTypes().get(88); + getDescriptor().getMessageTypes().get(90); internal_static_hbase_pb_GetClusterStatusRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetClusterStatusRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_GetClusterStatusResponse_descriptor = - getDescriptor().getMessageTypes().get(89); + getDescriptor().getMessageTypes().get(91); internal_static_hbase_pb_GetClusterStatusResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetClusterStatusResponse_descriptor, new java.lang.String[] { "ClusterStatus", }); internal_static_hbase_pb_IsMasterRunningRequest_descriptor = - getDescriptor().getMessageTypes().get(90); + getDescriptor().getMessageTypes().get(92); internal_static_hbase_pb_IsMasterRunningRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_IsMasterRunningRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_IsMasterRunningResponse_descriptor = - getDescriptor().getMessageTypes().get(91); + getDescriptor().getMessageTypes().get(93); internal_static_hbase_pb_IsMasterRunningResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_IsMasterRunningResponse_descriptor, new java.lang.String[] { "IsMasterRunning", }); internal_static_hbase_pb_ExecProcedureRequest_descriptor = - getDescriptor().getMessageTypes().get(92); + getDescriptor().getMessageTypes().get(94); internal_static_hbase_pb_ExecProcedureRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_ExecProcedureRequest_descriptor, new java.lang.String[] { "Procedure", }); internal_static_hbase_pb_ExecProcedureResponse_descriptor = - getDescriptor().getMessageTypes().get(93); + getDescriptor().getMessageTypes().get(95); internal_static_hbase_pb_ExecProcedureResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_ExecProcedureResponse_descriptor, new java.lang.String[] { "ExpectedTimeout", "ReturnData", }); internal_static_hbase_pb_IsProcedureDoneRequest_descriptor = - getDescriptor().getMessageTypes().get(94); + getDescriptor().getMessageTypes().get(96); internal_static_hbase_pb_IsProcedureDoneRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_IsProcedureDoneRequest_descriptor, new java.lang.String[] { "Procedure", }); internal_static_hbase_pb_IsProcedureDoneResponse_descriptor = - getDescriptor().getMessageTypes().get(95); + getDescriptor().getMessageTypes().get(97); internal_static_hbase_pb_IsProcedureDoneResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_IsProcedureDoneResponse_descriptor, new java.lang.String[] { "Done", "Snapshot", }); internal_static_hbase_pb_GetProcedureResultRequest_descriptor = - getDescriptor().getMessageTypes().get(96); + getDescriptor().getMessageTypes().get(98); internal_static_hbase_pb_GetProcedureResultRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetProcedureResultRequest_descriptor, new java.lang.String[] { "ProcId", }); internal_static_hbase_pb_GetProcedureResultResponse_descriptor = - getDescriptor().getMessageTypes().get(97); + getDescriptor().getMessageTypes().get(99); internal_static_hbase_pb_GetProcedureResultResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetProcedureResultResponse_descriptor, new java.lang.String[] { "State", "StartTime", "LastUpdate", "Result", "Exception", }); internal_static_hbase_pb_AbortProcedureRequest_descriptor = - getDescriptor().getMessageTypes().get(98); + getDescriptor().getMessageTypes().get(100); internal_static_hbase_pb_AbortProcedureRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_AbortProcedureRequest_descriptor, new java.lang.String[] { "ProcId", "MayInterruptIfRunning", }); internal_static_hbase_pb_AbortProcedureResponse_descriptor = - getDescriptor().getMessageTypes().get(99); + getDescriptor().getMessageTypes().get(101); internal_static_hbase_pb_AbortProcedureResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_AbortProcedureResponse_descriptor, new java.lang.String[] { "IsProcedureAborted", }); internal_static_hbase_pb_ListProceduresRequest_descriptor = - getDescriptor().getMessageTypes().get(100); + getDescriptor().getMessageTypes().get(102); internal_static_hbase_pb_ListProceduresRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_ListProceduresRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_ListProceduresResponse_descriptor = - getDescriptor().getMessageTypes().get(101); + getDescriptor().getMessageTypes().get(103); internal_static_hbase_pb_ListProceduresResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_ListProceduresResponse_descriptor, new java.lang.String[] { "Procedure", }); internal_static_hbase_pb_SetQuotaRequest_descriptor = - getDescriptor().getMessageTypes().get(102); + getDescriptor().getMessageTypes().get(104); internal_static_hbase_pb_SetQuotaRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SetQuotaRequest_descriptor, new java.lang.String[] { "UserName", "UserGroup", "Namespace", "TableName", "RemoveAll", "BypassGlobals", "Throttle", }); internal_static_hbase_pb_SetQuotaResponse_descriptor = - getDescriptor().getMessageTypes().get(103); + getDescriptor().getMessageTypes().get(105); internal_static_hbase_pb_SetQuotaResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SetQuotaResponse_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_MajorCompactionTimestampRequest_descriptor = - getDescriptor().getMessageTypes().get(104); + getDescriptor().getMessageTypes().get(106); internal_static_hbase_pb_MajorCompactionTimestampRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_MajorCompactionTimestampRequest_descriptor, new java.lang.String[] { "TableName", }); internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_descriptor = - getDescriptor().getMessageTypes().get(105); + getDescriptor().getMessageTypes().get(107); internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_descriptor, new java.lang.String[] { "Region", }); internal_static_hbase_pb_MajorCompactionTimestampResponse_descriptor = - getDescriptor().getMessageTypes().get(106); + getDescriptor().getMessageTypes().get(108); internal_static_hbase_pb_MajorCompactionTimestampResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_MajorCompactionTimestampResponse_descriptor, new java.lang.String[] { "CompactionTimestamp", }); internal_static_hbase_pb_SecurityCapabilitiesRequest_descriptor = - getDescriptor().getMessageTypes().get(107); + getDescriptor().getMessageTypes().get(109); internal_static_hbase_pb_SecurityCapabilitiesRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SecurityCapabilitiesRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_SecurityCapabilitiesResponse_descriptor = - getDescriptor().getMessageTypes().get(108); + getDescriptor().getMessageTypes().get(110); internal_static_hbase_pb_SecurityCapabilitiesResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SecurityCapabilitiesResponse_descriptor, diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java index f64d0c1b412..a45c42194cf 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java @@ -9733,6 +9733,540 @@ public final class ZooKeeperProtos { // @@protoc_insertion_point(class_scope:hbase.pb.SwitchState) } + public interface SplitAndMergeStateOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional bool split_enabled = 1; + /** + * optional bool split_enabled = 1; + */ + boolean hasSplitEnabled(); + /** + * optional bool split_enabled = 1; + */ + boolean getSplitEnabled(); + + // optional bool merge_enabled = 2; + /** + * optional bool merge_enabled = 2; + */ + boolean hasMergeEnabled(); + /** + * optional bool merge_enabled = 2; + */ + boolean getMergeEnabled(); + } + /** + * Protobuf type {@code hbase.pb.SplitAndMergeState} + * + *
+   **
+   * State for split and merge, used in hbck
+   * 
+ */ + public static final class SplitAndMergeState extends + com.google.protobuf.GeneratedMessage + implements SplitAndMergeStateOrBuilder { + // Use SplitAndMergeState.newBuilder() to construct. + private SplitAndMergeState(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private SplitAndMergeState(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final SplitAndMergeState defaultInstance; + public static SplitAndMergeState getDefaultInstance() { + return defaultInstance; + } + + public SplitAndMergeState getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private SplitAndMergeState( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + splitEnabled_ = input.readBool(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + mergeEnabled_ = input.readBool(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SplitAndMergeState_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SplitAndMergeState_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public SplitAndMergeState parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new SplitAndMergeState(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional bool split_enabled = 1; + public static final int SPLIT_ENABLED_FIELD_NUMBER = 1; + private boolean splitEnabled_; + /** + * optional bool split_enabled = 1; + */ + public boolean hasSplitEnabled() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional bool split_enabled = 1; + */ + public boolean getSplitEnabled() { + return splitEnabled_; + } + + // optional bool merge_enabled = 2; + public static final int MERGE_ENABLED_FIELD_NUMBER = 2; + private boolean mergeEnabled_; + /** + * optional bool merge_enabled = 2; + */ + public boolean hasMergeEnabled() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional bool merge_enabled = 2; + */ + public boolean getMergeEnabled() { + return mergeEnabled_; + } + + private void initFields() { + splitEnabled_ = false; + mergeEnabled_ = false; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBool(1, splitEnabled_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBool(2, mergeEnabled_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(1, splitEnabled_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(2, mergeEnabled_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState) obj; + + boolean result = true; + result = result && (hasSplitEnabled() == other.hasSplitEnabled()); + if (hasSplitEnabled()) { + result = result && (getSplitEnabled() + == other.getSplitEnabled()); + } + result = result && (hasMergeEnabled() == other.hasMergeEnabled()); + if (hasMergeEnabled()) { + result = result && (getMergeEnabled() + == other.getMergeEnabled()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasSplitEnabled()) { + hash = (37 * hash) + SPLIT_ENABLED_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getSplitEnabled()); + } + if (hasMergeEnabled()) { + hash = (37 * hash) + MERGE_ENABLED_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getMergeEnabled()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.SplitAndMergeState} + * + *
+     **
+     * State for split and merge, used in hbck
+     * 
+ */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeStateOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SplitAndMergeState_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SplitAndMergeState_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + splitEnabled_ = false; + bitField0_ = (bitField0_ & ~0x00000001); + mergeEnabled_ = false; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SplitAndMergeState_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState build() { + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.splitEnabled_ = splitEnabled_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.mergeEnabled_ = mergeEnabled_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState.getDefaultInstance()) return this; + if (other.hasSplitEnabled()) { + setSplitEnabled(other.getSplitEnabled()); + } + if (other.hasMergeEnabled()) { + setMergeEnabled(other.getMergeEnabled()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional bool split_enabled = 1; + private boolean splitEnabled_ ; + /** + * optional bool split_enabled = 1; + */ + public boolean hasSplitEnabled() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional bool split_enabled = 1; + */ + public boolean getSplitEnabled() { + return splitEnabled_; + } + /** + * optional bool split_enabled = 1; + */ + public Builder setSplitEnabled(boolean value) { + bitField0_ |= 0x00000001; + splitEnabled_ = value; + onChanged(); + return this; + } + /** + * optional bool split_enabled = 1; + */ + public Builder clearSplitEnabled() { + bitField0_ = (bitField0_ & ~0x00000001); + splitEnabled_ = false; + onChanged(); + return this; + } + + // optional bool merge_enabled = 2; + private boolean mergeEnabled_ ; + /** + * optional bool merge_enabled = 2; + */ + public boolean hasMergeEnabled() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional bool merge_enabled = 2; + */ + public boolean getMergeEnabled() { + return mergeEnabled_; + } + /** + * optional bool merge_enabled = 2; + */ + public Builder setMergeEnabled(boolean value) { + bitField0_ |= 0x00000002; + mergeEnabled_ = value; + onChanged(); + return this; + } + /** + * optional bool merge_enabled = 2; + */ + public Builder clearMergeEnabled() { + bitField0_ = (bitField0_ & ~0x00000002); + mergeEnabled_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.SplitAndMergeState) + } + + static { + defaultInstance = new SplitAndMergeState(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.SplitAndMergeState) + } + private static com.google.protobuf.Descriptors.Descriptor internal_static_hbase_pb_MetaRegionServer_descriptor; private static @@ -9793,6 +10327,11 @@ public final class ZooKeeperProtos { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hbase_pb_SwitchState_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_SplitAndMergeState_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_SplitAndMergeState_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { @@ -9837,9 +10376,10 @@ public final class ZooKeeperProtos { "ner\030\002 \001(\0132\024.hbase.pb.ServerName\022\021\n\tthrea" + "d_id\030\003 \001(\003\022\021\n\tis_shared\030\004 \001(\010\022\017\n\007purpose" + "\030\005 \001(\t\022\023\n\013create_time\030\006 \001(\003\"\036\n\013SwitchSta" + - "te\022\017\n\007enabled\030\001 \001(\010BE\n*org.apache.hadoop" + - ".hbase.protobuf.generatedB\017ZooKeeperProt" + - "osH\001\210\001\001\240\001\001" + "te\022\017\n\007enabled\030\001 \001(\010\"B\n\022SplitAndMergeStat" + + "e\022\025\n\rsplit_enabled\030\001 \001(\010\022\025\n\rmerge_enable" + + "d\030\002 \001(\010BE\n*org.apache.hadoop.hbase.proto" + + "buf.generatedB\017ZooKeeperProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -9918,6 +10458,12 @@ public final class ZooKeeperProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SwitchState_descriptor, new java.lang.String[] { "Enabled", }); + internal_static_hbase_pb_SplitAndMergeState_descriptor = + getDescriptor().getMessageTypes().get(12); + internal_static_hbase_pb_SplitAndMergeState_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_SplitAndMergeState_descriptor, + new java.lang.String[] { "SplitEnabled", "MergeEnabled", }); return null; } }; diff --git a/hbase-protocol/src/main/protobuf/Master.proto b/hbase-protocol/src/main/protobuf/Master.proto index 1f7a3b7e7bf..ad8111e658d 100644 --- a/hbase-protocol/src/main/protobuf/Master.proto +++ b/hbase-protocol/src/main/protobuf/Master.proto @@ -288,6 +288,7 @@ message SetSplitOrMergeEnabledRequest { required bool enabled = 1; optional bool synchronous = 2; repeated MasterSwitchType switch_types = 3; + optional bool skip_lock = 4; } message SetSplitOrMergeEnabledResponse { @@ -302,6 +303,12 @@ message IsSplitOrMergeEnabledResponse { required bool enabled = 1; } +message ReleaseSplitOrMergeLockAndRollbackRequest { +} + +message ReleaseSplitOrMergeLockAndRollbackResponse { +} + message NormalizeRequest { } @@ -671,6 +678,12 @@ service MasterService { rpc IsSplitOrMergeEnabled(IsSplitOrMergeEnabledRequest) returns(IsSplitOrMergeEnabledResponse); + /** + * Release lock and rollback state. + */ + rpc ReleaseSplitOrMergeLockAndRollback(ReleaseSplitOrMergeLockAndRollbackRequest) + returns(ReleaseSplitOrMergeLockAndRollbackResponse); + /** * Run region normalizer. Can NOT run for various reasons. Check logs. */ diff --git a/hbase-protocol/src/main/protobuf/ZooKeeper.proto b/hbase-protocol/src/main/protobuf/ZooKeeper.proto index 186d183f4da..41c0e0eebf9 100644 --- a/hbase-protocol/src/main/protobuf/ZooKeeper.proto +++ b/hbase-protocol/src/main/protobuf/ZooKeeper.proto @@ -165,4 +165,12 @@ message TableLock { */ message SwitchState { optional bool enabled = 1; -} \ No newline at end of file +} + +/** + * State for split and merge, used in hbck + */ +message SplitAndMergeState { + optional bool split_enabled = 1; + optional bool merge_enabled = 2; +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index 6ee022f8174..edfb3ce905b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.errorhandling.ForeignException; +import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.exceptions.MergeRegionException; import org.apache.hadoop.hbase.exceptions.UnknownProtocolException; import org.apache.hadoop.hbase.ipc.PriorityFunction; @@ -1495,6 +1496,10 @@ public class MasterRpcServices extends RSRpcServices try { master.checkInitialized(); boolean newValue = request.getEnabled(); + boolean skipLock = request.getSkipLock(); + if (!master.getSplitOrMergeTracker().lock(skipLock)) { + throw new DoNotRetryIOException("can't set splitOrMerge switch due to lock"); + } for (MasterSwitchType masterSwitchType : request.getSwitchTypesList()) { Admin.MasterSwitchType switchType = convert(masterSwitchType); boolean oldValue = master.isSplitOrMergeEnabled(switchType); @@ -1526,6 +1531,24 @@ public class MasterRpcServices extends RSRpcServices return response.build(); } + @Override + public ReleaseSplitOrMergeLockAndRollbackResponse + releaseSplitOrMergeLockAndRollback(RpcController controller, + ReleaseSplitOrMergeLockAndRollbackRequest request) throws ServiceException { + try { + master.getSplitOrMergeTracker().releaseLockAndRollback(); + } catch (KeeperException e) { + throw new ServiceException(e); + } catch (DeserializationException e) { + throw new ServiceException(e); + } catch (InterruptedException e) { + throw new ServiceException(e); + } + ReleaseSplitOrMergeLockAndRollbackResponse.Builder builder = + ReleaseSplitOrMergeLockAndRollbackResponse.newBuilder(); + return builder.build(); + } + @Override public NormalizeResponse normalize(RpcController controller, NormalizeRequest request) throws ServiceException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index 9abef9c082a..5af063418c0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -54,6 +54,7 @@ import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Joiner; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; @@ -307,6 +308,7 @@ public class HBaseFsck extends Configured implements Closeable { private Map> skippedRegions = new HashMap>(); + ZooKeeperWatcher zkw = null; /** * Constructor * @@ -345,6 +347,7 @@ public class HBaseFsck extends Configured implements Closeable { "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL), getConf().getInt( "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME)); + zkw = createZooKeeperWatcher(); } private class FileLockCallable implements Callable { @@ -686,7 +689,8 @@ public class HBaseFsck extends Configured implements Closeable { } boolean[] oldSplitAndMerge = null; if (shouldDisableSplitAndMerge()) { - oldSplitAndMerge = admin.setSplitOrMergeEnabled(false, false, + admin.releaseSplitOrMergeLockAndRollback(); + oldSplitAndMerge = admin.setSplitOrMergeEnabled(false, false, false, Admin.MasterSwitchType.SPLIT, Admin.MasterSwitchType.MERGE); } @@ -703,14 +707,7 @@ public class HBaseFsck extends Configured implements Closeable { if (shouldDisableSplitAndMerge()) { if (oldSplitAndMerge != null) { - if (oldSplitAndMerge[0] && oldSplitAndMerge[1]) { - admin.setSplitOrMergeEnabled(true, false, - Admin.MasterSwitchType.SPLIT, Admin.MasterSwitchType.MERGE); - } else if (oldSplitAndMerge[0]) { - admin.setSplitOrMergeEnabled(true, false, Admin.MasterSwitchType.SPLIT); - } else if (oldSplitAndMerge[1]) { - admin.setSplitOrMergeEnabled(true, false, Admin.MasterSwitchType.MERGE); - } + admin.releaseSplitOrMergeLockAndRollback(); } } } @@ -749,6 +746,10 @@ public class HBaseFsck extends Configured implements Closeable { } catch (Exception io) { LOG.warn(io); } finally { + if (zkw != null) { + zkw.close(); + zkw = null; + } IOUtils.closeQuietly(admin); IOUtils.closeQuietly(meta); IOUtils.closeQuietly(connection); @@ -1789,14 +1790,7 @@ public class HBaseFsck extends Configured implements Closeable { private ServerName getMetaRegionServerName(int replicaId) throws IOException, KeeperException { - ZooKeeperWatcher zkw = createZooKeeperWatcher(); - ServerName sn = null; - try { - sn = new MetaTableLocator().getMetaRegionLocation(zkw, replicaId); - } finally { - zkw.close(); - } - return sn; + return new MetaTableLocator().getMetaRegionLocation(zkw, replicaId); } /** @@ -3281,28 +3275,21 @@ public class HBaseFsck extends Configured implements Closeable { } private void checkAndFixTableLocks() throws IOException { - ZooKeeperWatcher zkw = createZooKeeperWatcher(); TableLockChecker checker = new TableLockChecker(zkw, errors); checker.checkTableLocks(); if (this.fixTableLocks) { checker.fixExpiredTableLocks(); } - zkw.close(); } private void checkAndFixReplication() throws IOException { - ZooKeeperWatcher zkw = createZooKeeperWatcher(); - try { - ReplicationChecker checker = new ReplicationChecker(getConf(), zkw, connection, errors); - checker.checkUnDeletedQueues(); + ReplicationChecker checker = new ReplicationChecker(getConf(), zkw, connection, errors); + checker.checkUnDeletedQueues(); - if (checker.hasUnDeletedQueues() && this.fixReplication) { - checker.fixUnDeletedQueues(); - setShouldRerun(); - } - } finally { - zkw.close(); + if (checker.hasUnDeletedQueues() && this.fixReplication) { + checker.fixUnDeletedQueues(); + setShouldRerun(); } } @@ -3372,12 +3359,7 @@ public class HBaseFsck extends Configured implements Closeable { private void unassignMetaReplica(HbckInfo hi) throws IOException, InterruptedException, KeeperException { undeployRegions(hi); - ZooKeeperWatcher zkw = createZooKeeperWatcher(); - try { - ZKUtil.deleteNode(zkw, zkw.getZNodeForReplica(hi.metaEntry.getReplicaId())); - } finally { - zkw.close(); - } + ZKUtil.deleteNode(zkw, zkw.getZNodeForReplica(hi.metaEntry.getReplicaId())); } private void assignMetaReplica(int replicaId) @@ -4206,7 +4188,12 @@ public class HBaseFsck extends Configured implements Closeable { * Disable the split and merge */ public static void setDisableSplitAndMerge() { - disableSplitAndMerge = true; + setDisableSplitAndMerge(true); + } + + @VisibleForTesting + public static void setDisableSplitAndMerge(boolean flag) { + disableSplitAndMerge = flag; } /** @@ -4226,7 +4213,7 @@ public class HBaseFsck extends Configured implements Closeable { public boolean shouldDisableSplitAndMerge() { return fixAny || disableSplitAndMerge; } - + /** * Set summary mode. * Print only summary of the tables and status (OK or INCONSISTENT) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/SplitOrMergeTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/SplitOrMergeTracker.java index 0d729a131e6..e548245ed25 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/SplitOrMergeTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/SplitOrMergeTracker.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hbase.zookeeper; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; @@ -25,6 +27,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState; import org.apache.hadoop.hbase.util.Bytes; import org.apache.zookeeper.KeeperException; @@ -37,8 +40,13 @@ import org.apache.zookeeper.KeeperException; @InterfaceAudience.Private public class SplitOrMergeTracker { + public static final String LOCK = "splitOrMergeLock"; + public static final String STATE = "splitOrMergeState"; + private String splitZnode; private String mergeZnode; + private String splitOrMergeLock; + private ZooKeeperWatcher watcher; private SwitchStateTracker splitStateTracker; private SwitchStateTracker mergeStateTracker; @@ -49,6 +57,9 @@ public class SplitOrMergeTracker { if (ZKUtil.checkExists(watcher, watcher.getSwitchZNode()) < 0) { ZKUtil.createAndFailSilent(watcher, watcher.getSwitchZNode()); } + if (ZKUtil.checkExists(watcher, watcher.getSwitchLockZNode()) < 0) { + ZKUtil.createAndFailSilent(watcher, watcher.getSwitchLockZNode()); + } } catch (KeeperException e) { throw new RuntimeException(e); } @@ -56,8 +67,12 @@ public class SplitOrMergeTracker { conf.get("zookeeper.znode.switch.split", "split")); mergeZnode = ZKUtil.joinZNode(watcher.getSwitchZNode(), conf.get("zookeeper.znode.switch.merge", "merge")); + + splitOrMergeLock = ZKUtil.joinZNode(watcher.getSwitchLockZNode(), LOCK); + splitStateTracker = new SwitchStateTracker(watcher, splitZnode, abortable); mergeStateTracker = new SwitchStateTracker(watcher, mergeZnode, abortable); + this.watcher = watcher; } public void start() { @@ -91,6 +106,76 @@ public class SplitOrMergeTracker { } } + /** + * rollback the original state and delete lock node. + * */ + public void releaseLockAndRollback() + throws KeeperException, DeserializationException, InterruptedException { + if (ZKUtil.checkExists(watcher, splitOrMergeLock) != -1) { + List ops = new ArrayList<>(); + rollback(ops); + ops.add(ZKUtil.ZKUtilOp.deleteNodeFailSilent(splitOrMergeLock)); + ZKUtil.multiOrSequential(watcher, ops, false); + } + } + + // If there is old states of switch on zk, do rollback + private void rollback(List ops) throws KeeperException, InterruptedException, DeserializationException { + String splitOrMergeState = ZKUtil.joinZNode(watcher.getSwitchLockZNode(), + SplitOrMergeTracker.STATE); + if (ZKUtil.checkExists(watcher, splitOrMergeState) != -1) { + byte[] bytes = ZKUtil.getData(watcher, splitOrMergeState); + ProtobufUtil.expectPBMagicPrefix(bytes); + ZooKeeperProtos.SplitAndMergeState.Builder builder = + ZooKeeperProtos.SplitAndMergeState.newBuilder(); + try { + int magicLen = ProtobufUtil.lengthOfPBMagic(); + ProtobufUtil.mergeFrom(builder, bytes, magicLen, bytes.length - magicLen); + } catch (IOException e) { + throw new DeserializationException(e); + } + ZooKeeperProtos.SplitAndMergeState splitAndMergeState = builder.build(); + splitStateTracker.setSwitchEnabled(splitAndMergeState.hasSplitEnabled()); + mergeStateTracker.setSwitchEnabled(splitAndMergeState.hasMergeEnabled()); + ops.add(ZKUtil.ZKUtilOp.deleteNodeFailSilent(splitOrMergeState)); + } + } + + /** + * If there is no lock, you could acquire the lock. + * After we create lock on zk, we save original splitOrMerge switches on zk. + * @param skipLock if true, it means we will skip the lock action + * but we still need to check whether the lock exists or not. + * @return true, lock successfully. otherwise, false + * */ + public boolean lock(boolean skipLock) throws KeeperException { + if (ZKUtil.checkExists(watcher, splitOrMergeLock) != -1) { + return false; + } + if (skipLock) { + return true; + } + ZKUtil.createAndFailSilent(watcher, splitOrMergeLock); + if (ZKUtil.checkExists(watcher, splitOrMergeLock) != -1) { + saveOriginalState(); + return true; + } + return false; + } + + private void saveOriginalState() throws KeeperException { + boolean splitEnabled = isSplitOrMergeEnabled(Admin.MasterSwitchType.SPLIT); + boolean mergeEnabled = isSplitOrMergeEnabled(Admin.MasterSwitchType.MERGE); + String splitOrMergeStates = ZKUtil.joinZNode(watcher.getSwitchLockZNode(), + SplitOrMergeTracker.STATE); + ZooKeeperProtos.SplitAndMergeState.Builder builder + = ZooKeeperProtos.SplitAndMergeState.newBuilder(); + builder.setSplitEnabled(splitEnabled); + builder.setMergeEnabled(mergeEnabled); + ZKUtil.createSetData(watcher, splitOrMergeStates, + ProtobufUtil.prependPBMagic(builder.build().toByteArray())); + } + private static class SwitchStateTracker extends ZooKeeperNodeTracker { public SwitchStateTracker(ZooKeeperWatcher watcher, String node, Abortable abortable) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeStatus.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeStatus.java index 6405a14949c..e1ce63bdb5b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeStatus.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeStatus.java @@ -41,6 +41,7 @@ import java.util.List; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; @Category({MediumTests.class, ClientTests.class}) public class TestSplitOrMergeStatus { @@ -76,14 +77,15 @@ public class TestSplitOrMergeStatus { Admin admin = TEST_UTIL.getAdmin(); initSwitchStatus(admin); - boolean[] results = admin.setSplitOrMergeEnabled(false, false, Admin.MasterSwitchType.SPLIT); + boolean[] results = admin.setSplitOrMergeEnabled(false, false, + true, Admin.MasterSwitchType.SPLIT); assertEquals(results.length, 1); assertTrue(results[0]); admin.split(t.getName()); int count = waitOnSplitOrMerge(t).size(); assertTrue(orignalCount == count); - results = admin.setSplitOrMergeEnabled(true, false, Admin.MasterSwitchType.SPLIT); + results = admin.setSplitOrMergeEnabled(true, false, true, Admin.MasterSwitchType.SPLIT); assertEquals(results.length, 1); assertFalse(results[0]); admin.split(t.getName()); @@ -108,7 +110,8 @@ public class TestSplitOrMergeStatus { waitForMergable(admin, name); int orignalCount = locator.getAllRegionLocations().size(); - boolean[] results = admin.setSplitOrMergeEnabled(false, false, Admin.MasterSwitchType.MERGE); + boolean[] results = admin.setSplitOrMergeEnabled(false, false, + true, Admin.MasterSwitchType.MERGE); assertEquals(results.length, 1); assertTrue(results[0]); List regions = admin.getTableRegions(t.getName()); @@ -119,7 +122,7 @@ public class TestSplitOrMergeStatus { assertTrue(orignalCount == count); waitForMergable(admin, name); - results = admin.setSplitOrMergeEnabled(true, false, Admin.MasterSwitchType.MERGE); + results = admin.setSplitOrMergeEnabled(true, false, true, Admin.MasterSwitchType.MERGE); assertEquals(results.length, 1); assertFalse(results[0]); admin.mergeRegions(regions.get(0).getEncodedNameAsBytes(), @@ -132,7 +135,7 @@ public class TestSplitOrMergeStatus { @Test public void testMultiSwitches() throws IOException { Admin admin = TEST_UTIL.getAdmin(); - boolean[] switches = admin.setSplitOrMergeEnabled(false, false, + boolean[] switches = admin.setSplitOrMergeEnabled(false, false, true, Admin.MasterSwitchType.SPLIT, Admin.MasterSwitchType.MERGE); for (boolean s : switches){ assertTrue(s); @@ -142,12 +145,34 @@ public class TestSplitOrMergeStatus { admin.close(); } + @Test + public void testSwitchLock() throws IOException { + Admin admin = TEST_UTIL.getAdmin(); + admin.setSplitOrMergeEnabled(false, false, false, + Admin.MasterSwitchType.SPLIT, Admin.MasterSwitchType.MERGE); + try { + admin.setSplitOrMergeEnabled(false, false, true, + Admin.MasterSwitchType.SPLIT, Admin.MasterSwitchType.MERGE); + fail(); + } catch (IOException e) { + LOG.info("", e); + } + admin.releaseSplitOrMergeLockAndRollback(); + try { + admin.setSplitOrMergeEnabled(true, false, true, + Admin.MasterSwitchType.SPLIT, Admin.MasterSwitchType.MERGE); + } catch (IOException e) { + fail(); + } + admin.close(); + } + private void initSwitchStatus(Admin admin) throws IOException { if (!admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.SPLIT)) { - admin.setSplitOrMergeEnabled(true, false, Admin.MasterSwitchType.SPLIT); + admin.setSplitOrMergeEnabled(true, false, true, Admin.MasterSwitchType.SPLIT); } if (!admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.MERGE)) { - admin.setSplitOrMergeEnabled(true, false, Admin.MasterSwitchType.MERGE); + admin.setSplitOrMergeEnabled(true, false, true, Admin.MasterSwitchType.MERGE); } assertTrue(admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.SPLIT)); assertTrue(admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.MERGE)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java index b722feb1b4b..dbb23a56c91 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; @@ -61,6 +62,7 @@ import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker; import org.apache.hadoop.hbase.util.hbck.HbckTestingUtil; +import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.junit.AfterClass; import org.junit.Assert; @@ -69,6 +71,8 @@ import org.junit.BeforeClass; import org.junit.Ignore; import org.junit.Test; import org.junit.experimental.categories.Category; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; import java.io.IOException; import java.util.ArrayList; @@ -89,6 +93,8 @@ import java.util.concurrent.atomic.AtomicBoolean; import static org.apache.hadoop.hbase.util.hbck.HbckTestingUtil.*; import static org.junit.Assert.*; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.spy; @Category({MiscTests.class, LargeTests.class}) public class TestHBaseFsckOneRS extends BaseTestHBaseFsck { @@ -1843,4 +1849,53 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck { doQuarantineTest(table, hbck, 3, 0, 0, 0, 1); hbck.close(); } + + /** + * See HBASE-15406 + * */ + @Test + public void testSplitOrMergeStatWhenHBCKAbort() throws Exception { + admin.setSplitOrMergeEnabled(true, false, true, + Admin.MasterSwitchType.SPLIT, Admin.MasterSwitchType.MERGE); + boolean oldSplit = admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.SPLIT); + boolean oldMerge = admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.MERGE); + + assertTrue(oldSplit); + assertTrue(oldMerge); + + ExecutorService exec = new ScheduledThreadPoolExecutor(10); + HBaseFsck hbck = new HBaseFsck(conf, exec); + HBaseFsck.setDisplayFullReport(); // i.e. -details + final HBaseFsck spiedHbck = spy(hbck); + doAnswer(new Answer() { + @Override + public Object answer(InvocationOnMock invocation) throws Throwable { + // we close splitOrMerge flag in hbck, so in finally hbck will not set splitOrMerge back. + spiedHbck.setDisableSplitAndMerge(false); + return null; + } + }).when(spiedHbck).onlineConsistencyRepair(); + spiedHbck.setDisableSplitAndMerge(); + spiedHbck.connect(); + spiedHbck.onlineHbck(); + spiedHbck.close(); + + boolean split = admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.SPLIT); + boolean merge = admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.MERGE); + assertFalse(split); + assertFalse(merge); + + // rerun hbck to repair the switches state + hbck = new HBaseFsck(conf, exec); + hbck.setDisableSplitAndMerge(); + hbck.connect(); + hbck.onlineHbck(); + hbck.close(); + + split = admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.SPLIT); + merge = admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.MERGE); + + assertTrue(split); + assertTrue(merge); + } } diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb index 40c3711aedb..b45a210129b 100644 --- a/hbase-shell/src/main/ruby/hbase/admin.rb +++ b/hbase-shell/src/main/ruby/hbase/admin.rb @@ -146,7 +146,7 @@ module Hbase end @admin.setSplitOrMergeEnabled( java.lang.Boolean.valueOf(enabled), java.lang.Boolean.valueOf(false), - switch_type)[0] + java.lang.Boolean.valueOf(true), switch_type)[0] end #----------------------------------------------------------------------------------------------