From e108a4f815ba3a8130f6613109d23a774ac50f16 Mon Sep 17 00:00:00 2001 From: Stephen Yuan Jiang Date: Thu, 27 Oct 2016 21:45:41 -0700 Subject: [PATCH] HBASE-14551 Procedure v2 - Reimplement split (Stephen Yuan Jiang) --- .../hadoop/hbase/client/HBaseAdmin.java | 1 - .../hbase/shaded/protobuf/ProtobufUtil.java | 46 + .../shaded/protobuf/RequestConverter.java | 16 +- .../shaded/protobuf/ResponseConverter.java | 13 + .../protobuf/generated/AdminProtos.java | 1450 +++++++++++-- .../generated/MasterProcedureProtos.java | 1884 ++++++++++++++++- .../generated/RegionServerStatusProtos.java | 1633 +++++++++++++- .../src/main/protobuf/Admin.proto | 15 + .../src/main/protobuf/MasterProcedure.proto | 21 + .../main/protobuf/RegionServerStatus.proto | 27 + .../hbase/rsgroup/RSGroupAdminEndpoint.java | 40 +- .../BaseMasterAndRegionObserver.java | 40 + .../hbase/coprocessor/BaseMasterObserver.java | 40 + .../hbase/coprocessor/MasterObserver.java | 71 + .../hbase/coprocessor/RegionObserver.java | 24 +- .../hbase/master/AssignmentManager.java | 36 +- .../apache/hadoop/hbase/master/HMaster.java | 25 + .../hbase/master/MasterCoprocessorHost.java | 109 + .../hbase/master/MasterRpcServices.java | 18 + .../hadoop/hbase/master/MasterServices.java | 15 + .../hadoop/hbase/master/RegionStates.java | 14 +- .../hadoop/hbase/master/ServerManager.java | 26 +- .../procedure/SplitTableRegionProcedure.java | 821 +++++++ .../hadoop/hbase/regionserver/HRegion.java | 2 +- .../hbase/regionserver/HRegionFileSystem.java | 11 +- .../hbase/regionserver/HRegionServer.java | 116 + .../hbase/regionserver/RSRpcServices.java | 29 + .../regionserver/RegionCoprocessorHost.java | 26 +- .../regionserver/RegionServerServices.java | 10 + .../hbase/regionserver/SplitRequest.java | 115 +- .../security/access/AccessController.java | 21 +- .../hbase/MockRegionServerServices.java | 10 + .../coprocessor/TestCoprocessorInterface.java | 14 +- .../hbase/coprocessor/TestMasterObserver.java | 39 + .../hbase/master/MockNoopMasterServices.java | 9 + .../hadoop/hbase/master/MockRegionServer.java | 23 +- .../TestSplitTableRegionProcedure.java | 480 +++++ .../hbase/namespace/TestNamespaceAuditor.java | 11 - .../TestEndToEndSplitTransaction.java | 86 - .../TestSplitTransactionOnCluster.java | 508 +---- .../security/access/TestAccessController.java | 22 +- .../access/TestWithDisabledAuthorization.java | 21 +- .../hadoop/hbase/util/TestHBaseFsckOneRS.java | 127 +- 43 files changed, 7043 insertions(+), 1022 deletions(-) create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SplitTableRegionProcedure.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSplitTableRegionProcedure.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index 51d07e3be2a..f4008fd624a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -165,7 +165,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTa import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState; import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException; import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java index 53101de8cf6..94efa37efce 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java @@ -104,6 +104,8 @@ import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException; import org.apache.hadoop.hbase.shaded.com.google.protobuf.TextFormat; import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest; @@ -1719,6 +1721,33 @@ public final class ProtobufUtil { } } + /** + * A helper to close a region for split + * using admin protocol. + * + * @param controller RPC controller + * @param admin Admin service + * @param server the RS that hosts the target region + * @param parentRegionInfo the target region info + * @return true if the region is closed + * @throws IOException + */ + public static boolean closeRegionForSplit( + final RpcController controller, + final AdminService.BlockingInterface admin, + final ServerName server, + final HRegionInfo parentRegionInfo) throws IOException { + CloseRegionForSplitRequest closeRegionForSplitRequest = + ProtobufUtil.buildCloseRegionForSplitRequest(server, parentRegionInfo); + try { + CloseRegionForSplitResponse response = + admin.closeRegionForSplit(controller, closeRegionForSplitRequest); + return ResponseConverter.isClosed(response); + } catch (ServiceException se) { + throw getRemoteException(se); + } + } + /** * A helper to warmup a region given a region name * using admin protocol @@ -3062,6 +3091,23 @@ public final class ProtobufUtil { return builder.build(); } + /** + * Create a CloseRegionForSplitRequest for a given region + * + * @param server the RS server that hosts the region + * @param parentRegionInfo the info of the region to close + * @return a CloseRegionForSplitRequest + */ + public static CloseRegionForSplitRequest buildCloseRegionForSplitRequest( + final ServerName server, + final HRegionInfo parentRegionInfo) { + CloseRegionForSplitRequest.Builder builder = CloseRegionForSplitRequest.newBuilder(); + RegionSpecifier parentRegion = RequestConverter.buildRegionSpecifier( + RegionSpecifierType.REGION_NAME, parentRegionInfo.getRegionName()); + builder.setRegion(parentRegion); + return builder.build(); + } + /** * Create a CloseRegionRequest for a given encoded region name * diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java index 7da3727e0eb..abd1563ac23 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java @@ -106,6 +106,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOr import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Pair; @@ -850,7 +851,7 @@ public final class RequestConverter { return ubuilder.build(); } - /** + /** * Create a WarmupRegionRequest for a given region name * * @param regionInfo Region we are warming up @@ -1061,6 +1062,19 @@ public final class RequestConverter { return builder.build(); } + public static SplitTableRegionRequest buildSplitTableRegionRequest( + final HRegionInfo regionInfo, + final byte[] splitPoint, + final long nonceGroup, + final long nonce) { + SplitTableRegionRequest.Builder builder = SplitTableRegionRequest.newBuilder(); + builder.setRegionInfo(HRegionInfo.convert(regionInfo)); + builder.setSplitRow(UnsafeByteOperations.unsafeWrap(splitPoint)); + builder.setNonceGroup(nonceGroup); + builder.setNonce(nonce); + return builder.build(); + } + /** * Create a protocol buffer AssignRegionRequest * diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java index dc7b95d2748..11fc9312f8c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.SingleResponse; import org.apache.hadoop.hbase.ipc.ServerRpcController; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse; @@ -251,6 +252,18 @@ public final class ResponseConverter { return proto.getClosed(); } + /** + * Check if the region is closed from a CloseRegionForSplitResponse + * + * @param proto the CloseRegionForSplitResponse + * @return the region close state + */ + public static boolean isClosed + (final CloseRegionForSplitResponse proto) { + if (proto == null || !proto.hasClosed()) return false; + return proto.getClosed(); + } + /** * A utility to build a GetServerInfoResponse. * diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java index 20020d452b7..b4e46b03840 100644 --- a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java +++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java @@ -9414,6 +9414,1084 @@ public final class AdminProtos { } + public interface CloseRegionForSplitRequestOrBuilder extends + // @@protoc_insertion_point(interface_extends:hbase.pb.CloseRegionForSplitRequest) + org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { + + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + boolean hasRegion(); + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier getRegion(); + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder(); + } + /** + *
+   **
+   * Closes the specified region and create
+   * child region.
+   * 
+ * + * Protobuf type {@code hbase.pb.CloseRegionForSplitRequest} + */ + public static final class CloseRegionForSplitRequest extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:hbase.pb.CloseRegionForSplitRequest) + CloseRegionForSplitRequestOrBuilder { + // Use CloseRegionForSplitRequest.newBuilder() to construct. + private CloseRegionForSplitRequest(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private CloseRegionForSplitRequest() { + } + + @java.lang.Override + public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private CloseRegionForSplitRequest( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + this(); + int mutable_bitField0_ = 0; + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields = + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = region_.toBuilder(); + } + region_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(region_); + region_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + } + } + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_CloseRegionForSplitRequest_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_CloseRegionForSplitRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest.Builder.class); + } + + private int bitField0_; + public static final int REGION_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier region_; + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public boolean hasRegion() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier getRegion() { + return region_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance() : region_; + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder() { + return region_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance() : region_; + } + + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + if (!hasRegion()) { + memoizedIsInitialized = 0; + return false; + } + if (!getRegion().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, getRegion()); + } + unknownFields.writeTo(output); + } + + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeMessageSize(1, getRegion()); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest other = (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest) obj; + + boolean result = true; + result = result && (hasRegion() == other.hasRegion()); + if (hasRegion()) { + result = result && getRegion() + .equals(other.getRegion()); + } + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasRegion()) { + hash = (37 * hash) + REGION_FIELD_NUMBER; + hash = (53 * hash) + getRegion().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest parseFrom(byte[] data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest parseFrom( + byte[] data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest parseFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest parseDelimitedFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+     **
+     * Closes the specified region and create
+     * child region.
+     * 
+ * + * Protobuf type {@code hbase.pb.CloseRegionForSplitRequest} + */ + public static final class Builder extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:hbase.pb.CloseRegionForSplitRequest) + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequestOrBuilder { + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_CloseRegionForSplitRequest_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_CloseRegionForSplitRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + getRegionFieldBuilder(); + } + } + public Builder clear() { + super.clear(); + if (regionBuilder_ == null) { + region_ = null; + } else { + regionBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_CloseRegionForSplitRequest_descriptor; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest build() { + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest buildPartial() { + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest result = new org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (regionBuilder_ == null) { + result.region_ = region_; + } else { + result.region_ = regionBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder clone() { + return (Builder) super.clone(); + } + public Builder setField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.setField(field, value); + } + public Builder clearField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + public Builder clearOneof( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + public Builder setRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + int index, Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + public Builder addRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.addRepeatedField(field, value); + } + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest) { + return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest other) { + if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest.getDefaultInstance()) return this; + if (other.hasRegion()) { + mergeRegion(other.getRegion()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + public final boolean isInitialized() { + if (!hasRegion()) { + return false; + } + if (!getRegion().isInitialized()) { + return false; + } + return true; + } + + public Builder mergeFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier region_ = null; + private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> regionBuilder_; + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public boolean hasRegion() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier getRegion() { + if (regionBuilder_ == null) { + return region_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance() : region_; + } else { + return regionBuilder_.getMessage(); + } + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public Builder setRegion(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier value) { + if (regionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + region_ = value; + onChanged(); + } else { + regionBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public Builder setRegion( + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder builderForValue) { + if (regionBuilder_ == null) { + region_ = builderForValue.build(); + onChanged(); + } else { + regionBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public Builder mergeRegion(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier value) { + if (regionBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + region_ != null && + region_ != org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance()) { + region_ = + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.newBuilder(region_).mergeFrom(value).buildPartial(); + } else { + region_ = value; + } + onChanged(); + } else { + regionBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public Builder clearRegion() { + if (regionBuilder_ == null) { + region_ = null; + onChanged(); + } else { + regionBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder getRegionBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getRegionFieldBuilder().getBuilder(); + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder() { + if (regionBuilder_ != null) { + return regionBuilder_.getMessageOrBuilder(); + } else { + return region_ == null ? + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance() : region_; + } + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> + getRegionFieldBuilder() { + if (regionBuilder_ == null) { + regionBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder>( + getRegion(), + getParentForChildren(), + isClean()); + region_ = null; + } + return regionBuilder_; + } + public final Builder setUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + public final Builder mergeUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:hbase.pb.CloseRegionForSplitRequest) + } + + // @@protoc_insertion_point(class_scope:hbase.pb.CloseRegionForSplitRequest) + private static final org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest(); + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser + PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser() { + public CloseRegionForSplitRequest parsePartialFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return new CloseRegionForSplitRequest(input, extensionRegistry); + } + }; + + public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + + public interface CloseRegionForSplitResponseOrBuilder extends + // @@protoc_insertion_point(interface_extends:hbase.pb.CloseRegionForSplitResponse) + org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { + + /** + * required bool closed = 1; + */ + boolean hasClosed(); + /** + * required bool closed = 1; + */ + boolean getClosed(); + } + /** + * Protobuf type {@code hbase.pb.CloseRegionForSplitResponse} + */ + public static final class CloseRegionForSplitResponse extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:hbase.pb.CloseRegionForSplitResponse) + CloseRegionForSplitResponseOrBuilder { + // Use CloseRegionForSplitResponse.newBuilder() to construct. + private CloseRegionForSplitResponse(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private CloseRegionForSplitResponse() { + closed_ = false; + } + + @java.lang.Override + public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private CloseRegionForSplitResponse( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + this(); + int mutable_bitField0_ = 0; + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields = + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + closed_ = input.readBool(); + break; + } + } + } + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_CloseRegionForSplitResponse_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_CloseRegionForSplitResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse.Builder.class); + } + + private int bitField0_; + public static final int CLOSED_FIELD_NUMBER = 1; + private boolean closed_; + /** + * required bool closed = 1; + */ + public boolean hasClosed() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bool closed = 1; + */ + public boolean getClosed() { + return closed_; + } + + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + if (!hasClosed()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBool(1, closed_); + } + unknownFields.writeTo(output); + } + + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeBoolSize(1, closed_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse other = (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse) obj; + + boolean result = true; + result = result && (hasClosed() == other.hasClosed()); + if (hasClosed()) { + result = result && (getClosed() + == other.getClosed()); + } + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasClosed()) { + hash = (37 * hash) + CLOSED_FIELD_NUMBER; + hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashBoolean( + getClosed()); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse parseFrom(byte[] data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse parseFrom( + byte[] data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse parseFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse parseDelimitedFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.CloseRegionForSplitResponse} + */ + public static final class Builder extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:hbase.pb.CloseRegionForSplitResponse) + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponseOrBuilder { + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_CloseRegionForSplitResponse_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_CloseRegionForSplitResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + public Builder clear() { + super.clear(); + closed_ = false; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_CloseRegionForSplitResponse_descriptor; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse build() { + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse buildPartial() { + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse result = new org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.closed_ = closed_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder clone() { + return (Builder) super.clone(); + } + public Builder setField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.setField(field, value); + } + public Builder clearField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + public Builder clearOneof( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + public Builder setRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + int index, Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + public Builder addRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.addRepeatedField(field, value); + } + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse) { + return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse other) { + if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse.getDefaultInstance()) return this; + if (other.hasClosed()) { + setClosed(other.getClosed()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + public final boolean isInitialized() { + if (!hasClosed()) { + return false; + } + return true; + } + + public Builder mergeFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private boolean closed_ ; + /** + * required bool closed = 1; + */ + public boolean hasClosed() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bool closed = 1; + */ + public boolean getClosed() { + return closed_; + } + /** + * required bool closed = 1; + */ + public Builder setClosed(boolean value) { + bitField0_ |= 0x00000001; + closed_ = value; + onChanged(); + return this; + } + /** + * required bool closed = 1; + */ + public Builder clearClosed() { + bitField0_ = (bitField0_ & ~0x00000001); + closed_ = false; + onChanged(); + return this; + } + public final Builder setUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + public final Builder mergeUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:hbase.pb.CloseRegionForSplitResponse) + } + + // @@protoc_insertion_point(class_scope:hbase.pb.CloseRegionForSplitResponse) + private static final org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse(); + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser + PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser() { + public CloseRegionForSplitResponse parsePartialFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return new CloseRegionForSplitResponse(input, extensionRegistry); + } + }; + + public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + public interface FlushRegionRequestOrBuilder extends // @@protoc_insertion_point(interface_extends:hbase.pb.FlushRegionRequest) org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { @@ -23623,6 +24701,14 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + /** + * rpc CloseRegionForSplit(.hbase.pb.CloseRegionForSplitRequest) returns (.hbase.pb.CloseRegionForSplitResponse); + */ + public abstract void closeRegionForSplit( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + /** * rpc FlushRegion(.hbase.pb.FlushRegionRequest) returns (.hbase.pb.FlushRegionResponse); */ @@ -23764,6 +24850,14 @@ public final class AdminProtos { impl.closeRegion(controller, request, done); } + @java.lang.Override + public void closeRegionForSplit( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { + impl.closeRegionForSplit(controller, request, done); + } + @java.lang.Override public void flushRegion( org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, @@ -23887,26 +24981,28 @@ public final class AdminProtos { case 5: return impl.closeRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest)request); case 6: - return impl.flushRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest)request); + return impl.closeRegionForSplit(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest)request); case 7: - return impl.splitRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest)request); + return impl.flushRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest)request); case 8: - return impl.compactRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest)request); + return impl.splitRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest)request); case 9: - return impl.mergeRegions(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest)request); + return impl.compactRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest)request); case 10: - return impl.replicateWALEntry(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest)request); + return impl.mergeRegions(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest)request); case 11: - return impl.replay(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest)request); + return impl.replicateWALEntry(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest)request); case 12: - return impl.rollWALWriter(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest)request); + return impl.replay(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest)request); case 13: - return impl.getServerInfo(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoRequest)request); + return impl.rollWALWriter(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest)request); case 14: - return impl.stopServer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest)request); + return impl.getServerInfo(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoRequest)request); case 15: - return impl.updateFavoredNodes(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest)request); + return impl.stopServer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest)request); case 16: + return impl.updateFavoredNodes(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest)request); + case 17: return impl.updateConfiguration(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest)request); default: throw new java.lang.AssertionError("Can't get here."); @@ -23935,26 +25031,28 @@ public final class AdminProtos { case 5: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest.getDefaultInstance(); case 6: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest.getDefaultInstance(); case 7: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest.getDefaultInstance(); case 8: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest.getDefaultInstance(); case 9: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest.getDefaultInstance(); case 10: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest.getDefaultInstance(); case 11: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest.getDefaultInstance(); case 12: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest.getDefaultInstance(); case 13: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest.getDefaultInstance(); case 14: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoRequest.getDefaultInstance(); case 15: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest.getDefaultInstance(); case 16: + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.getDefaultInstance(); + case 17: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); @@ -23983,26 +25081,28 @@ public final class AdminProtos { case 5: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse.getDefaultInstance(); case 6: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse.getDefaultInstance(); case 7: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse.getDefaultInstance(); case 8: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse.getDefaultInstance(); case 9: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse.getDefaultInstance(); case 10: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse.getDefaultInstance(); case 11: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance(); case 12: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance(); case 13: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse.getDefaultInstance(); case 14: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse.getDefaultInstance(); case 15: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse.getDefaultInstance(); case 16: + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance(); + case 17: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); @@ -24060,6 +25160,14 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + /** + * rpc CloseRegionForSplit(.hbase.pb.CloseRegionForSplitRequest) returns (.hbase.pb.CloseRegionForSplitResponse); + */ + public abstract void closeRegionForSplit( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + /** * rpc FlushRegion(.hbase.pb.FlushRegionRequest) returns (.hbase.pb.FlushRegionResponse); */ @@ -24201,56 +25309,61 @@ public final class AdminProtos { done)); return; case 6: + this.closeRegionForSplit(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest)request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 7: this.flushRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 7: + case 8: this.splitRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 8: + case 9: this.compactRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 9: + case 10: this.mergeRegions(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 10: + case 11: this.replicateWALEntry(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 11: + case 12: this.replay(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 12: + case 13: this.rollWALWriter(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 13: + case 14: this.getServerInfo(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 14: + case 15: this.stopServer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 15: + case 16: this.updateFavoredNodes(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 16: + case 17: this.updateConfiguration(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest)request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); @@ -24282,26 +25395,28 @@ public final class AdminProtos { case 5: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest.getDefaultInstance(); case 6: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest.getDefaultInstance(); case 7: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest.getDefaultInstance(); case 8: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest.getDefaultInstance(); case 9: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest.getDefaultInstance(); case 10: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest.getDefaultInstance(); case 11: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest.getDefaultInstance(); case 12: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest.getDefaultInstance(); case 13: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest.getDefaultInstance(); case 14: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoRequest.getDefaultInstance(); case 15: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest.getDefaultInstance(); case 16: + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.getDefaultInstance(); + case 17: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); @@ -24330,26 +25445,28 @@ public final class AdminProtos { case 5: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse.getDefaultInstance(); case 6: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse.getDefaultInstance(); case 7: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse.getDefaultInstance(); case 8: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse.getDefaultInstance(); case 9: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse.getDefaultInstance(); case 10: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse.getDefaultInstance(); case 11: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance(); case 12: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance(); case 13: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse.getDefaultInstance(); case 14: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse.getDefaultInstance(); case 15: - return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse.getDefaultInstance(); case 16: + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance(); + case 17: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); @@ -24462,12 +25579,27 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse.getDefaultInstance())); } + public void closeRegionForSplit( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(6), + controller, + request, + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse.getDefaultInstance(), + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse.class, + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse.getDefaultInstance())); + } + public void flushRegion( org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(6), + getDescriptor().getMethods().get(7), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse.getDefaultInstance(), @@ -24482,7 +25614,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(7), + getDescriptor().getMethods().get(8), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse.getDefaultInstance(), @@ -24497,7 +25629,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(8), + getDescriptor().getMethods().get(9), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse.getDefaultInstance(), @@ -24512,7 +25644,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(9), + getDescriptor().getMethods().get(10), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse.getDefaultInstance(), @@ -24527,7 +25659,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(10), + getDescriptor().getMethods().get(11), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance(), @@ -24542,7 +25674,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(11), + getDescriptor().getMethods().get(12), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance(), @@ -24557,7 +25689,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(12), + getDescriptor().getMethods().get(13), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse.getDefaultInstance(), @@ -24572,7 +25704,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(13), + getDescriptor().getMethods().get(14), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse.getDefaultInstance(), @@ -24587,7 +25719,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(14), + getDescriptor().getMethods().get(15), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse.getDefaultInstance(), @@ -24602,7 +25734,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(15), + getDescriptor().getMethods().get(16), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance(), @@ -24617,7 +25749,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(16), + getDescriptor().getMethods().get(17), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse.getDefaultInstance(), @@ -24664,6 +25796,11 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException; + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse closeRegionForSplit( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest request) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException; + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse flushRegion( org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest request) @@ -24799,12 +25936,24 @@ public final class AdminProtos { } + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse closeRegionForSplit( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest request) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(6), + controller, + request, + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse.getDefaultInstance()); + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse flushRegion( org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(6), + getDescriptor().getMethods().get(7), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse.getDefaultInstance()); @@ -24816,7 +25965,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(7), + getDescriptor().getMethods().get(8), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse.getDefaultInstance()); @@ -24828,7 +25977,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(8), + getDescriptor().getMethods().get(9), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse.getDefaultInstance()); @@ -24840,7 +25989,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(9), + getDescriptor().getMethods().get(10), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse.getDefaultInstance()); @@ -24852,7 +26001,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(10), + getDescriptor().getMethods().get(11), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance()); @@ -24864,7 +26013,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(11), + getDescriptor().getMethods().get(12), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance()); @@ -24876,7 +26025,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(12), + getDescriptor().getMethods().get(13), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse.getDefaultInstance()); @@ -24888,7 +26037,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(13), + getDescriptor().getMethods().get(14), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse.getDefaultInstance()); @@ -24900,7 +26049,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(14), + getDescriptor().getMethods().get(15), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse.getDefaultInstance()); @@ -24912,7 +26061,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(15), + getDescriptor().getMethods().get(16), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance()); @@ -24924,7 +26073,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(16), + getDescriptor().getMethods().get(17), controller, request, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse.getDefaultInstance()); @@ -25000,6 +26149,16 @@ public final class AdminProtos { private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hbase_pb_CloseRegionResponse_fieldAccessorTable; + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_CloseRegionForSplitRequest_descriptor; + private static final + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_hbase_pb_CloseRegionForSplitRequest_fieldAccessorTable; + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_CloseRegionForSplitResponse_descriptor; + private static final + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_hbase_pb_CloseRegionForSplitResponse_fieldAccessorTable; private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor internal_static_hbase_pb_FlushRegionRequest_descriptor; private static final @@ -25159,83 +26318,88 @@ public final class AdminProtos { "n_in_ZK\030\003 \001(\010:\004true\0220\n\022destination_serve" + "r\030\004 \001(\0132\024.hbase.pb.ServerName\022\027\n\017serverS" + "tartCode\030\005 \001(\004\"%\n\023CloseRegionResponse\022\016\n" + + "\006closed\030\001 \002(\010\"G\n\032CloseRegionForSplitRequ" + + "est\022)\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSpe" + + "cifier\"-\n\033CloseRegionForSplitResponse\022\016\n" + "\006closed\030\001 \002(\010\"y\n\022FlushRegionRequest\022)\n\006r" + - "egion\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022\030" + + "egion\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022\030", "\n\020if_older_than_ts\030\002 \001(\004\022\036\n\026write_flush_" + "wal_marker\030\003 \001(\010\"_\n\023FlushRegionResponse\022" + - "\027\n\017last_flush_time\030\001 \002(\004\022\017\n\007flushed\030\002 \001(", + "\027\n\017last_flush_time\030\001 \002(\004\022\017\n\007flushed\030\002 \001(" + "\010\022\036\n\026wrote_flush_wal_marker\030\003 \001(\010\"T\n\022Spl" + "itRegionRequest\022)\n\006region\030\001 \002(\0132\031.hbase." + "pb.RegionSpecifier\022\023\n\013split_point\030\002 \001(\014\"" + "\025\n\023SplitRegionResponse\"`\n\024CompactRegionR" + "equest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.Region" + "Specifier\022\r\n\005major\030\002 \001(\010\022\016\n\006family\030\003 \001(\014" + - "\"\027\n\025CompactRegionResponse\"\315\001\n\031UpdateFavo" + + "\"\027\n\025CompactRegionResponse\"\315\001\n\031UpdateFavo", "redNodesRequest\022I\n\013update_info\030\001 \003(\01324.h" + "base.pb.UpdateFavoredNodesRequest.Region" + - "UpdateInfo\032e\n\020RegionUpdateInfo\022$\n\006region", + "UpdateInfo\032e\n\020RegionUpdateInfo\022$\n\006region" + "\030\001 \002(\0132\024.hbase.pb.RegionInfo\022+\n\rfavored_" + "nodes\030\002 \003(\0132\024.hbase.pb.ServerName\".\n\032Upd" + "ateFavoredNodesResponse\022\020\n\010response\030\001 \001(" + "\r\"\244\001\n\023MergeRegionsRequest\022+\n\010region_a\030\001 " + "\002(\0132\031.hbase.pb.RegionSpecifier\022+\n\010region" + "_b\030\002 \002(\0132\031.hbase.pb.RegionSpecifier\022\027\n\010f" + - "orcible\030\003 \001(\010:\005false\022\032\n\022master_system_ti" + + "orcible\030\003 \001(\010:\005false\022\032\n\022master_system_ti", "me\030\004 \001(\004\"\026\n\024MergeRegionsResponse\"a\n\010WALE" + "ntry\022\035\n\003key\030\001 \002(\0132\020.hbase.pb.WALKey\022\027\n\017k" + - "ey_value_bytes\030\002 \003(\014\022\035\n\025associated_cell_", + "ey_value_bytes\030\002 \003(\014\022\035\n\025associated_cell_" + "count\030\003 \001(\005\"\242\001\n\030ReplicateWALEntryRequest" + "\022!\n\005entry\030\001 \003(\0132\022.hbase.pb.WALEntry\022\034\n\024r" + "eplicationClusterId\030\002 \001(\t\022\"\n\032sourceBaseN" + "amespaceDirPath\030\003 \001(\t\022!\n\031sourceHFileArch" + "iveDirPath\030\004 \001(\t\"\033\n\031ReplicateWALEntryRes" + "ponse\"\026\n\024RollWALWriterRequest\"0\n\025RollWAL" + - "WriterResponse\022\027\n\017region_to_flush\030\001 \003(\014\"" + + "WriterResponse\022\027\n\017region_to_flush\030\001 \003(\014\"", "#\n\021StopServerRequest\022\016\n\006reason\030\001 \002(\t\"\024\n\022" + "StopServerResponse\"\026\n\024GetServerInfoReque" + - "st\"K\n\nServerInfo\022)\n\013server_name\030\001 \002(\0132\024.", + "st\"K\n\nServerInfo\022)\n\013server_name\030\001 \002(\0132\024." + "hbase.pb.ServerName\022\022\n\nwebui_port\030\002 \001(\r\"" + "B\n\025GetServerInfoResponse\022)\n\013server_info\030" + "\001 \002(\0132\024.hbase.pb.ServerInfo\"\034\n\032UpdateCon" + "figurationRequest\"\035\n\033UpdateConfiguration" + - "Response2\207\013\n\014AdminService\022P\n\rGetRegionIn" + + "Response2\353\013\n\014AdminService\022P\n\rGetRegionIn" + "fo\022\036.hbase.pb.GetRegionInfoRequest\032\037.hba" + - "se.pb.GetRegionInfoResponse\022M\n\014GetStoreF" + + "se.pb.GetRegionInfoResponse\022M\n\014GetStoreF", "ile\022\035.hbase.pb.GetStoreFileRequest\032\036.hba" + "se.pb.GetStoreFileResponse\022V\n\017GetOnlineR" + - "egion\022 .hbase.pb.GetOnlineRegionRequest\032", + "egion\022 .hbase.pb.GetOnlineRegionRequest\032" + "!.hbase.pb.GetOnlineRegionResponse\022G\n\nOp" + "enRegion\022\033.hbase.pb.OpenRegionRequest\032\034." + "hbase.pb.OpenRegionResponse\022M\n\014WarmupReg" + "ion\022\035.hbase.pb.WarmupRegionRequest\032\036.hba" + "se.pb.WarmupRegionResponse\022J\n\013CloseRegio" + "n\022\034.hbase.pb.CloseRegionRequest\032\035.hbase." + - "pb.CloseRegionResponse\022J\n\013FlushRegion\022\034." + - "hbase.pb.FlushRegionRequest\032\035.hbase.pb.F" + - "lushRegionResponse\022J\n\013SplitRegion\022\034.hbas" + - "e.pb.SplitRegionRequest\032\035.hbase.pb.Split", - "RegionResponse\022P\n\rCompactRegion\022\036.hbase." + - "pb.CompactRegionRequest\032\037.hbase.pb.Compa" + - "ctRegionResponse\022M\n\014MergeRegions\022\035.hbase" + - ".pb.MergeRegionsRequest\032\036.hbase.pb.Merge" + - "RegionsResponse\022\\\n\021ReplicateWALEntry\022\".h" + - "base.pb.ReplicateWALEntryRequest\032#.hbase" + - ".pb.ReplicateWALEntryResponse\022Q\n\006Replay\022" + - "\".hbase.pb.ReplicateWALEntryRequest\032#.hb" + - "ase.pb.ReplicateWALEntryResponse\022P\n\rRoll" + - "WALWriter\022\036.hbase.pb.RollWALWriterReques", - "t\032\037.hbase.pb.RollWALWriterResponse\022P\n\rGe" + - "tServerInfo\022\036.hbase.pb.GetServerInfoRequ" + - "est\032\037.hbase.pb.GetServerInfoResponse\022G\n\n" + - "StopServer\022\033.hbase.pb.StopServerRequest\032" + - "\034.hbase.pb.StopServerResponse\022_\n\022UpdateF" + - "avoredNodes\022#.hbase.pb.UpdateFavoredNode" + - "sRequest\032$.hbase.pb.UpdateFavoredNodesRe" + - "sponse\022b\n\023UpdateConfiguration\022$.hbase.pb" + - ".UpdateConfigurationRequest\032%.hbase.pb.U" + - "pdateConfigurationResponseBH\n1org.apache", - ".hadoop.hbase.shaded.protobuf.generatedB" + - "\013AdminProtosH\001\210\001\001\240\001\001" + "pb.CloseRegionResponse\022b\n\023CloseRegionFor", + "Split\022$.hbase.pb.CloseRegionForSplitRequ" + + "est\032%.hbase.pb.CloseRegionForSplitRespon" + + "se\022J\n\013FlushRegion\022\034.hbase.pb.FlushRegion" + + "Request\032\035.hbase.pb.FlushRegionResponse\022J" + + "\n\013SplitRegion\022\034.hbase.pb.SplitRegionRequ" + + "est\032\035.hbase.pb.SplitRegionResponse\022P\n\rCo" + + "mpactRegion\022\036.hbase.pb.CompactRegionRequ" + + "est\032\037.hbase.pb.CompactRegionResponse\022M\n\014" + + "MergeRegions\022\035.hbase.pb.MergeRegionsRequ" + + "est\032\036.hbase.pb.MergeRegionsResponse\022\\\n\021R", + "eplicateWALEntry\022\".hbase.pb.ReplicateWAL" + + "EntryRequest\032#.hbase.pb.ReplicateWALEntr" + + "yResponse\022Q\n\006Replay\022\".hbase.pb.Replicate" + + "WALEntryRequest\032#.hbase.pb.ReplicateWALE" + + "ntryResponse\022P\n\rRollWALWriter\022\036.hbase.pb" + + ".RollWALWriterRequest\032\037.hbase.pb.RollWAL" + + "WriterResponse\022P\n\rGetServerInfo\022\036.hbase." + + "pb.GetServerInfoRequest\032\037.hbase.pb.GetSe" + + "rverInfoResponse\022G\n\nStopServer\022\033.hbase.p" + + "b.StopServerRequest\032\034.hbase.pb.StopServe", + "rResponse\022_\n\022UpdateFavoredNodes\022#.hbase." + + "pb.UpdateFavoredNodesRequest\032$.hbase.pb." + + "UpdateFavoredNodesResponse\022b\n\023UpdateConf" + + "iguration\022$.hbase.pb.UpdateConfiguration" + + "Request\032%.hbase.pb.UpdateConfigurationRe" + + "sponseBH\n1org.apache.hadoop.hbase.shaded" + + ".protobuf.generatedB\013AdminProtosH\001\210\001\001\240\001\001" }; org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() { @@ -25329,44 +26493,56 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_CloseRegionResponse_descriptor, new java.lang.String[] { "Closed", }); - internal_static_hbase_pb_FlushRegionRequest_descriptor = + internal_static_hbase_pb_CloseRegionForSplitRequest_descriptor = getDescriptor().getMessageTypes().get(12); + internal_static_hbase_pb_CloseRegionForSplitRequest_fieldAccessorTable = new + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_hbase_pb_CloseRegionForSplitRequest_descriptor, + new java.lang.String[] { "Region", }); + internal_static_hbase_pb_CloseRegionForSplitResponse_descriptor = + getDescriptor().getMessageTypes().get(13); + internal_static_hbase_pb_CloseRegionForSplitResponse_fieldAccessorTable = new + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_hbase_pb_CloseRegionForSplitResponse_descriptor, + new java.lang.String[] { "Closed", }); + internal_static_hbase_pb_FlushRegionRequest_descriptor = + getDescriptor().getMessageTypes().get(14); internal_static_hbase_pb_FlushRegionRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_FlushRegionRequest_descriptor, new java.lang.String[] { "Region", "IfOlderThanTs", "WriteFlushWalMarker", }); internal_static_hbase_pb_FlushRegionResponse_descriptor = - getDescriptor().getMessageTypes().get(13); + getDescriptor().getMessageTypes().get(15); internal_static_hbase_pb_FlushRegionResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_FlushRegionResponse_descriptor, new java.lang.String[] { "LastFlushTime", "Flushed", "WroteFlushWalMarker", }); internal_static_hbase_pb_SplitRegionRequest_descriptor = - getDescriptor().getMessageTypes().get(14); + getDescriptor().getMessageTypes().get(16); internal_static_hbase_pb_SplitRegionRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_SplitRegionRequest_descriptor, new java.lang.String[] { "Region", "SplitPoint", }); internal_static_hbase_pb_SplitRegionResponse_descriptor = - getDescriptor().getMessageTypes().get(15); + getDescriptor().getMessageTypes().get(17); internal_static_hbase_pb_SplitRegionResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_SplitRegionResponse_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_CompactRegionRequest_descriptor = - getDescriptor().getMessageTypes().get(16); + getDescriptor().getMessageTypes().get(18); internal_static_hbase_pb_CompactRegionRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_CompactRegionRequest_descriptor, new java.lang.String[] { "Region", "Major", "Family", }); internal_static_hbase_pb_CompactRegionResponse_descriptor = - getDescriptor().getMessageTypes().get(17); + getDescriptor().getMessageTypes().get(19); internal_static_hbase_pb_CompactRegionResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_CompactRegionResponse_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_UpdateFavoredNodesRequest_descriptor = - getDescriptor().getMessageTypes().get(18); + getDescriptor().getMessageTypes().get(20); internal_static_hbase_pb_UpdateFavoredNodesRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_UpdateFavoredNodesRequest_descriptor, @@ -25378,91 +26554,91 @@ public final class AdminProtos { internal_static_hbase_pb_UpdateFavoredNodesRequest_RegionUpdateInfo_descriptor, new java.lang.String[] { "Region", "FavoredNodes", }); internal_static_hbase_pb_UpdateFavoredNodesResponse_descriptor = - getDescriptor().getMessageTypes().get(19); + getDescriptor().getMessageTypes().get(21); internal_static_hbase_pb_UpdateFavoredNodesResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_UpdateFavoredNodesResponse_descriptor, new java.lang.String[] { "Response", }); internal_static_hbase_pb_MergeRegionsRequest_descriptor = - getDescriptor().getMessageTypes().get(20); + getDescriptor().getMessageTypes().get(22); internal_static_hbase_pb_MergeRegionsRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_MergeRegionsRequest_descriptor, new java.lang.String[] { "RegionA", "RegionB", "Forcible", "MasterSystemTime", }); internal_static_hbase_pb_MergeRegionsResponse_descriptor = - getDescriptor().getMessageTypes().get(21); + getDescriptor().getMessageTypes().get(23); internal_static_hbase_pb_MergeRegionsResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_MergeRegionsResponse_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_WALEntry_descriptor = - getDescriptor().getMessageTypes().get(22); + getDescriptor().getMessageTypes().get(24); internal_static_hbase_pb_WALEntry_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_WALEntry_descriptor, new java.lang.String[] { "Key", "KeyValueBytes", "AssociatedCellCount", }); internal_static_hbase_pb_ReplicateWALEntryRequest_descriptor = - getDescriptor().getMessageTypes().get(23); + getDescriptor().getMessageTypes().get(25); internal_static_hbase_pb_ReplicateWALEntryRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_ReplicateWALEntryRequest_descriptor, new java.lang.String[] { "Entry", "ReplicationClusterId", "SourceBaseNamespaceDirPath", "SourceHFileArchiveDirPath", }); internal_static_hbase_pb_ReplicateWALEntryResponse_descriptor = - getDescriptor().getMessageTypes().get(24); + getDescriptor().getMessageTypes().get(26); internal_static_hbase_pb_ReplicateWALEntryResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_ReplicateWALEntryResponse_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_RollWALWriterRequest_descriptor = - getDescriptor().getMessageTypes().get(25); + getDescriptor().getMessageTypes().get(27); internal_static_hbase_pb_RollWALWriterRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_RollWALWriterRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_RollWALWriterResponse_descriptor = - getDescriptor().getMessageTypes().get(26); + getDescriptor().getMessageTypes().get(28); internal_static_hbase_pb_RollWALWriterResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_RollWALWriterResponse_descriptor, new java.lang.String[] { "RegionToFlush", }); internal_static_hbase_pb_StopServerRequest_descriptor = - getDescriptor().getMessageTypes().get(27); + getDescriptor().getMessageTypes().get(29); internal_static_hbase_pb_StopServerRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_StopServerRequest_descriptor, new java.lang.String[] { "Reason", }); internal_static_hbase_pb_StopServerResponse_descriptor = - getDescriptor().getMessageTypes().get(28); + getDescriptor().getMessageTypes().get(30); internal_static_hbase_pb_StopServerResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_StopServerResponse_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_GetServerInfoRequest_descriptor = - getDescriptor().getMessageTypes().get(29); + getDescriptor().getMessageTypes().get(31); internal_static_hbase_pb_GetServerInfoRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_GetServerInfoRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_ServerInfo_descriptor = - getDescriptor().getMessageTypes().get(30); + getDescriptor().getMessageTypes().get(32); internal_static_hbase_pb_ServerInfo_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_ServerInfo_descriptor, new java.lang.String[] { "ServerName", "WebuiPort", }); internal_static_hbase_pb_GetServerInfoResponse_descriptor = - getDescriptor().getMessageTypes().get(31); + getDescriptor().getMessageTypes().get(33); internal_static_hbase_pb_GetServerInfoResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_GetServerInfoResponse_descriptor, new java.lang.String[] { "ServerInfo", }); internal_static_hbase_pb_UpdateConfigurationRequest_descriptor = - getDescriptor().getMessageTypes().get(32); + getDescriptor().getMessageTypes().get(34); internal_static_hbase_pb_UpdateConfigurationRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_UpdateConfigurationRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_UpdateConfigurationResponse_descriptor = - getDescriptor().getMessageTypes().get(33); + getDescriptor().getMessageTypes().get(35); internal_static_hbase_pb_UpdateConfigurationResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_UpdateConfigurationResponse_descriptor, diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProcedureProtos.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProcedureProtos.java index 9942cd28a6e..6ddfe0759c4 100644 --- a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProcedureProtos.java +++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProcedureProtos.java @@ -1832,6 +1832,168 @@ public final class MasterProcedureProtos { // @@protoc_insertion_point(enum_scope:hbase.pb.DispatchMergingRegionsState) } + /** + * Protobuf enum {@code hbase.pb.SplitTableRegionState} + */ + public enum SplitTableRegionState + implements org.apache.hadoop.hbase.shaded.com.google.protobuf.ProtocolMessageEnum { + /** + * SPLIT_TABLE_REGION_PREPARE = 1; + */ + SPLIT_TABLE_REGION_PREPARE(1), + /** + * SPLIT_TABLE_REGION_PRE_OPERATION = 2; + */ + SPLIT_TABLE_REGION_PRE_OPERATION(2), + /** + * SPLIT_TABLE_REGION_SET_SPLITTING_TABLE_STATE = 3; + */ + SPLIT_TABLE_REGION_SET_SPLITTING_TABLE_STATE(3), + /** + * SPLIT_TABLE_REGION_CLOSED_PARENT_REGION = 4; + */ + SPLIT_TABLE_REGION_CLOSED_PARENT_REGION(4), + /** + * SPLIT_TABLE_REGION_CREATE_DAUGHTER_REGIONS = 5; + */ + SPLIT_TABLE_REGION_CREATE_DAUGHTER_REGIONS(5), + /** + * SPLIT_TABLE_REGION_PRE_OPERATION_BEFORE_PONR = 6; + */ + SPLIT_TABLE_REGION_PRE_OPERATION_BEFORE_PONR(6), + /** + * SPLIT_TABLE_REGION_UPDATE_META = 7; + */ + SPLIT_TABLE_REGION_UPDATE_META(7), + /** + * SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_PONR = 8; + */ + SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_PONR(8), + /** + * SPLIT_TABLE_REGION_OPEN_CHILD_REGIONS = 9; + */ + SPLIT_TABLE_REGION_OPEN_CHILD_REGIONS(9), + /** + * SPLIT_TABLE_REGION_POST_OPERATION = 10; + */ + SPLIT_TABLE_REGION_POST_OPERATION(10), + ; + + /** + * SPLIT_TABLE_REGION_PREPARE = 1; + */ + public static final int SPLIT_TABLE_REGION_PREPARE_VALUE = 1; + /** + * SPLIT_TABLE_REGION_PRE_OPERATION = 2; + */ + public static final int SPLIT_TABLE_REGION_PRE_OPERATION_VALUE = 2; + /** + * SPLIT_TABLE_REGION_SET_SPLITTING_TABLE_STATE = 3; + */ + public static final int SPLIT_TABLE_REGION_SET_SPLITTING_TABLE_STATE_VALUE = 3; + /** + * SPLIT_TABLE_REGION_CLOSED_PARENT_REGION = 4; + */ + public static final int SPLIT_TABLE_REGION_CLOSED_PARENT_REGION_VALUE = 4; + /** + * SPLIT_TABLE_REGION_CREATE_DAUGHTER_REGIONS = 5; + */ + public static final int SPLIT_TABLE_REGION_CREATE_DAUGHTER_REGIONS_VALUE = 5; + /** + * SPLIT_TABLE_REGION_PRE_OPERATION_BEFORE_PONR = 6; + */ + public static final int SPLIT_TABLE_REGION_PRE_OPERATION_BEFORE_PONR_VALUE = 6; + /** + * SPLIT_TABLE_REGION_UPDATE_META = 7; + */ + public static final int SPLIT_TABLE_REGION_UPDATE_META_VALUE = 7; + /** + * SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_PONR = 8; + */ + public static final int SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_PONR_VALUE = 8; + /** + * SPLIT_TABLE_REGION_OPEN_CHILD_REGIONS = 9; + */ + public static final int SPLIT_TABLE_REGION_OPEN_CHILD_REGIONS_VALUE = 9; + /** + * SPLIT_TABLE_REGION_POST_OPERATION = 10; + */ + public static final int SPLIT_TABLE_REGION_POST_OPERATION_VALUE = 10; + + + public final int getNumber() { + return value; + } + + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static SplitTableRegionState valueOf(int value) { + return forNumber(value); + } + + public static SplitTableRegionState forNumber(int value) { + switch (value) { + case 1: return SPLIT_TABLE_REGION_PREPARE; + case 2: return SPLIT_TABLE_REGION_PRE_OPERATION; + case 3: return SPLIT_TABLE_REGION_SET_SPLITTING_TABLE_STATE; + case 4: return SPLIT_TABLE_REGION_CLOSED_PARENT_REGION; + case 5: return SPLIT_TABLE_REGION_CREATE_DAUGHTER_REGIONS; + case 6: return SPLIT_TABLE_REGION_PRE_OPERATION_BEFORE_PONR; + case 7: return SPLIT_TABLE_REGION_UPDATE_META; + case 8: return SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_PONR; + case 9: return SPLIT_TABLE_REGION_OPEN_CHILD_REGIONS; + case 10: return SPLIT_TABLE_REGION_POST_OPERATION; + default: return null; + } + } + + public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap< + SplitTableRegionState> internalValueMap = + new org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap() { + public SplitTableRegionState findValueByNumber(int number) { + return SplitTableRegionState.forNumber(number); + } + }; + + public final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(ordinal()); + } + public final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(15); + } + + private static final SplitTableRegionState[] VALUES = values(); + + public static SplitTableRegionState valueOf( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private SplitTableRegionState(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:hbase.pb.SplitTableRegionState) + } + /** * Protobuf enum {@code hbase.pb.ServerCrashState} */ @@ -1970,7 +2132,7 @@ public final class MasterProcedureProtos { } public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(15); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(16); } private static final ServerCrashState[] VALUES = values(); @@ -20947,6 +21109,1492 @@ public final class MasterProcedureProtos { } + public interface SplitTableRegionStateDataOrBuilder extends + // @@protoc_insertion_point(interface_extends:hbase.pb.SplitTableRegionStateData) + org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { + + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + boolean hasUserInfo(); + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation getUserInfo(); + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder(); + + /** + * required .hbase.pb.TableName table_name = 2; + */ + boolean hasTableName(); + /** + * required .hbase.pb.TableName table_name = 2; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName getTableName(); + /** + * required .hbase.pb.TableName table_name = 2; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder(); + + /** + * required .hbase.pb.RegionInfo parent_region_info = 3; + */ + boolean hasParentRegionInfo(); + /** + * required .hbase.pb.RegionInfo parent_region_info = 3; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getParentRegionInfo(); + /** + * required .hbase.pb.RegionInfo parent_region_info = 3; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getParentRegionInfoOrBuilder(); + + /** + * optional bytes split_row = 4; + */ + boolean hasSplitRow(); + /** + * optional bytes split_row = 4; + */ + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString getSplitRow(); + + /** + * repeated .hbase.pb.RegionInfo child_region_info = 5; + */ + java.util.List + getChildRegionInfoList(); + /** + * repeated .hbase.pb.RegionInfo child_region_info = 5; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getChildRegionInfo(int index); + /** + * repeated .hbase.pb.RegionInfo child_region_info = 5; + */ + int getChildRegionInfoCount(); + /** + * repeated .hbase.pb.RegionInfo child_region_info = 5; + */ + java.util.List + getChildRegionInfoOrBuilderList(); + /** + * repeated .hbase.pb.RegionInfo child_region_info = 5; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getChildRegionInfoOrBuilder( + int index); + } + /** + * Protobuf type {@code hbase.pb.SplitTableRegionStateData} + */ + public static final class SplitTableRegionStateData extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:hbase.pb.SplitTableRegionStateData) + SplitTableRegionStateDataOrBuilder { + // Use SplitTableRegionStateData.newBuilder() to construct. + private SplitTableRegionStateData(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private SplitTableRegionStateData() { + splitRow_ = org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.EMPTY; + childRegionInfo_ = java.util.Collections.emptyList(); + } + + @java.lang.Override + public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private SplitTableRegionStateData( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + this(); + int mutable_bitField0_ = 0; + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields = + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = userInfo_.toBuilder(); + } + userInfo_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(userInfo_); + userInfo_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 18: { + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + subBuilder = tableName_.toBuilder(); + } + tableName_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(tableName_); + tableName_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000002; + break; + } + case 26: { + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder subBuilder = null; + if (((bitField0_ & 0x00000004) == 0x00000004)) { + subBuilder = parentRegionInfo_.toBuilder(); + } + parentRegionInfo_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(parentRegionInfo_); + parentRegionInfo_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000004; + break; + } + case 34: { + bitField0_ |= 0x00000008; + splitRow_ = input.readBytes(); + break; + } + case 42: { + if (!((mutable_bitField0_ & 0x00000010) == 0x00000010)) { + childRegionInfo_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000010; + } + childRegionInfo_.add( + input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry)); + break; + } + } + } + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000010) == 0x00000010)) { + childRegionInfo_ = java.util.Collections.unmodifiableList(childRegionInfo_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_SplitTableRegionStateData_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_SplitTableRegionStateData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStateData.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStateData.Builder.class); + } + + private int bitField0_; + public static final int USER_INFO_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation userInfo_; + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + public boolean hasUserInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation getUserInfo() { + return userInfo_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance() : userInfo_; + } + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() { + return userInfo_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance() : userInfo_; + } + + public static final int TABLE_NAME_FIELD_NUMBER = 2; + private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName tableName_; + /** + * required .hbase.pb.TableName table_name = 2; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .hbase.pb.TableName table_name = 2; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName getTableName() { + return tableName_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.getDefaultInstance() : tableName_; + } + /** + * required .hbase.pb.TableName table_name = 2; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + return tableName_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.getDefaultInstance() : tableName_; + } + + public static final int PARENT_REGION_INFO_FIELD_NUMBER = 3; + private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo parentRegionInfo_; + /** + * required .hbase.pb.RegionInfo parent_region_info = 3; + */ + public boolean hasParentRegionInfo() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required .hbase.pb.RegionInfo parent_region_info = 3; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getParentRegionInfo() { + return parentRegionInfo_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : parentRegionInfo_; + } + /** + * required .hbase.pb.RegionInfo parent_region_info = 3; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getParentRegionInfoOrBuilder() { + return parentRegionInfo_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : parentRegionInfo_; + } + + public static final int SPLIT_ROW_FIELD_NUMBER = 4; + private org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString splitRow_; + /** + * optional bytes split_row = 4; + */ + public boolean hasSplitRow() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional bytes split_row = 4; + */ + public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString getSplitRow() { + return splitRow_; + } + + public static final int CHILD_REGION_INFO_FIELD_NUMBER = 5; + private java.util.List childRegionInfo_; + /** + * repeated .hbase.pb.RegionInfo child_region_info = 5; + */ + public java.util.List getChildRegionInfoList() { + return childRegionInfo_; + } + /** + * repeated .hbase.pb.RegionInfo child_region_info = 5; + */ + public java.util.List + getChildRegionInfoOrBuilderList() { + return childRegionInfo_; + } + /** + * repeated .hbase.pb.RegionInfo child_region_info = 5; + */ + public int getChildRegionInfoCount() { + return childRegionInfo_.size(); + } + /** + * repeated .hbase.pb.RegionInfo child_region_info = 5; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getChildRegionInfo(int index) { + return childRegionInfo_.get(index); + } + /** + * repeated .hbase.pb.RegionInfo child_region_info = 5; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getChildRegionInfoOrBuilder( + int index) { + return childRegionInfo_.get(index); + } + + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + if (!hasUserInfo()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasTableName()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasParentRegionInfo()) { + memoizedIsInitialized = 0; + return false; + } + if (!getUserInfo().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + if (!getTableName().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + if (!getParentRegionInfo().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getChildRegionInfoCount(); i++) { + if (!getChildRegionInfo(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, getUserInfo()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, getTableName()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeMessage(3, getParentRegionInfo()); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeBytes(4, splitRow_); + } + for (int i = 0; i < childRegionInfo_.size(); i++) { + output.writeMessage(5, childRegionInfo_.get(i)); + } + unknownFields.writeTo(output); + } + + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeMessageSize(1, getUserInfo()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeMessageSize(2, getTableName()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeMessageSize(3, getParentRegionInfo()); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeBytesSize(4, splitRow_); + } + for (int i = 0; i < childRegionInfo_.size(); i++) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeMessageSize(5, childRegionInfo_.get(i)); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStateData)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStateData other = (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStateData) obj; + + boolean result = true; + result = result && (hasUserInfo() == other.hasUserInfo()); + if (hasUserInfo()) { + result = result && getUserInfo() + .equals(other.getUserInfo()); + } + result = result && (hasTableName() == other.hasTableName()); + if (hasTableName()) { + result = result && getTableName() + .equals(other.getTableName()); + } + result = result && (hasParentRegionInfo() == other.hasParentRegionInfo()); + if (hasParentRegionInfo()) { + result = result && getParentRegionInfo() + .equals(other.getParentRegionInfo()); + } + result = result && (hasSplitRow() == other.hasSplitRow()); + if (hasSplitRow()) { + result = result && getSplitRow() + .equals(other.getSplitRow()); + } + result = result && getChildRegionInfoList() + .equals(other.getChildRegionInfoList()); + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasUserInfo()) { + hash = (37 * hash) + USER_INFO_FIELD_NUMBER; + hash = (53 * hash) + getUserInfo().hashCode(); + } + if (hasTableName()) { + hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER; + hash = (53 * hash) + getTableName().hashCode(); + } + if (hasParentRegionInfo()) { + hash = (37 * hash) + PARENT_REGION_INFO_FIELD_NUMBER; + hash = (53 * hash) + getParentRegionInfo().hashCode(); + } + if (hasSplitRow()) { + hash = (37 * hash) + SPLIT_ROW_FIELD_NUMBER; + hash = (53 * hash) + getSplitRow().hashCode(); + } + if (getChildRegionInfoCount() > 0) { + hash = (37 * hash) + CHILD_REGION_INFO_FIELD_NUMBER; + hash = (53 * hash) + getChildRegionInfoList().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStateData parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStateData parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStateData parseFrom(byte[] data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStateData parseFrom( + byte[] data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStateData parseFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStateData parseFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStateData parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStateData parseDelimitedFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStateData parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStateData parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStateData prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.SplitTableRegionStateData} + */ + public static final class Builder extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:hbase.pb.SplitTableRegionStateData) + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStateDataOrBuilder { + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_SplitTableRegionStateData_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_SplitTableRegionStateData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStateData.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStateData.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStateData.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + getUserInfoFieldBuilder(); + getTableNameFieldBuilder(); + getParentRegionInfoFieldBuilder(); + getChildRegionInfoFieldBuilder(); + } + } + public Builder clear() { + super.clear(); + if (userInfoBuilder_ == null) { + userInfo_ = null; + } else { + userInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + if (tableNameBuilder_ == null) { + tableName_ = null; + } else { + tableNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + if (parentRegionInfoBuilder_ == null) { + parentRegionInfo_ = null; + } else { + parentRegionInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + splitRow_ = org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000008); + if (childRegionInfoBuilder_ == null) { + childRegionInfo_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000010); + } else { + childRegionInfoBuilder_.clear(); + } + return this; + } + + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_SplitTableRegionStateData_descriptor; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStateData getDefaultInstanceForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStateData.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStateData build() { + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStateData result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStateData buildPartial() { + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStateData result = new org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStateData(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (userInfoBuilder_ == null) { + result.userInfo_ = userInfo_; + } else { + result.userInfo_ = userInfoBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + if (tableNameBuilder_ == null) { + result.tableName_ = tableName_; + } else { + result.tableName_ = tableNameBuilder_.build(); + } + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + if (parentRegionInfoBuilder_ == null) { + result.parentRegionInfo_ = parentRegionInfo_; + } else { + result.parentRegionInfo_ = parentRegionInfoBuilder_.build(); + } + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + result.splitRow_ = splitRow_; + if (childRegionInfoBuilder_ == null) { + if (((bitField0_ & 0x00000010) == 0x00000010)) { + childRegionInfo_ = java.util.Collections.unmodifiableList(childRegionInfo_); + bitField0_ = (bitField0_ & ~0x00000010); + } + result.childRegionInfo_ = childRegionInfo_; + } else { + result.childRegionInfo_ = childRegionInfoBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder clone() { + return (Builder) super.clone(); + } + public Builder setField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.setField(field, value); + } + public Builder clearField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + public Builder clearOneof( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + public Builder setRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + int index, Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + public Builder addRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.addRepeatedField(field, value); + } + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStateData) { + return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStateData)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStateData other) { + if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStateData.getDefaultInstance()) return this; + if (other.hasUserInfo()) { + mergeUserInfo(other.getUserInfo()); + } + if (other.hasTableName()) { + mergeTableName(other.getTableName()); + } + if (other.hasParentRegionInfo()) { + mergeParentRegionInfo(other.getParentRegionInfo()); + } + if (other.hasSplitRow()) { + setSplitRow(other.getSplitRow()); + } + if (childRegionInfoBuilder_ == null) { + if (!other.childRegionInfo_.isEmpty()) { + if (childRegionInfo_.isEmpty()) { + childRegionInfo_ = other.childRegionInfo_; + bitField0_ = (bitField0_ & ~0x00000010); + } else { + ensureChildRegionInfoIsMutable(); + childRegionInfo_.addAll(other.childRegionInfo_); + } + onChanged(); + } + } else { + if (!other.childRegionInfo_.isEmpty()) { + if (childRegionInfoBuilder_.isEmpty()) { + childRegionInfoBuilder_.dispose(); + childRegionInfoBuilder_ = null; + childRegionInfo_ = other.childRegionInfo_; + bitField0_ = (bitField0_ & ~0x00000010); + childRegionInfoBuilder_ = + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + getChildRegionInfoFieldBuilder() : null; + } else { + childRegionInfoBuilder_.addAllMessages(other.childRegionInfo_); + } + } + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + public final boolean isInitialized() { + if (!hasUserInfo()) { + return false; + } + if (!hasTableName()) { + return false; + } + if (!hasParentRegionInfo()) { + return false; + } + if (!getUserInfo().isInitialized()) { + return false; + } + if (!getTableName().isInitialized()) { + return false; + } + if (!getParentRegionInfo().isInitialized()) { + return false; + } + for (int i = 0; i < getChildRegionInfoCount(); i++) { + if (!getChildRegionInfo(i).isInitialized()) { + return false; + } + } + return true; + } + + public Builder mergeFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStateData parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStateData) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation userInfo_ = null; + private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformationOrBuilder> userInfoBuilder_; + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + public boolean hasUserInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation getUserInfo() { + if (userInfoBuilder_ == null) { + return userInfo_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance() : userInfo_; + } else { + return userInfoBuilder_.getMessage(); + } + } + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + public Builder setUserInfo(org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation value) { + if (userInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + userInfo_ = value; + onChanged(); + } else { + userInfoBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + public Builder setUserInfo( + org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation.Builder builderForValue) { + if (userInfoBuilder_ == null) { + userInfo_ = builderForValue.build(); + onChanged(); + } else { + userInfoBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + public Builder mergeUserInfo(org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation value) { + if (userInfoBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + userInfo_ != null && + userInfo_ != org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance()) { + userInfo_ = + org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation.newBuilder(userInfo_).mergeFrom(value).buildPartial(); + } else { + userInfo_ = value; + } + onChanged(); + } else { + userInfoBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + public Builder clearUserInfo() { + if (userInfoBuilder_ == null) { + userInfo_ = null; + onChanged(); + } else { + userInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation.Builder getUserInfoBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getUserInfoFieldBuilder().getBuilder(); + } + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() { + if (userInfoBuilder_ != null) { + return userInfoBuilder_.getMessageOrBuilder(); + } else { + return userInfo_ == null ? + org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance() : userInfo_; + } + } + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformationOrBuilder> + getUserInfoFieldBuilder() { + if (userInfoBuilder_ == null) { + userInfoBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformationOrBuilder>( + getUserInfo(), + getParentForChildren(), + isClean()); + userInfo_ = null; + } + return userInfoBuilder_; + } + + private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName tableName_ = null; + private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_; + /** + * required .hbase.pb.TableName table_name = 2; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .hbase.pb.TableName table_name = 2; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName getTableName() { + if (tableNameBuilder_ == null) { + return tableName_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.getDefaultInstance() : tableName_; + } else { + return tableNameBuilder_.getMessage(); + } + } + /** + * required .hbase.pb.TableName table_name = 2; + */ + public Builder setTableName(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableName_ = value; + onChanged(); + } else { + tableNameBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .hbase.pb.TableName table_name = 2; + */ + public Builder setTableName( + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + tableName_ = builderForValue.build(); + onChanged(); + } else { + tableNameBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .hbase.pb.TableName table_name = 2; + */ + public Builder mergeTableName(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + tableName_ != null && + tableName_ != org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) { + tableName_ = + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial(); + } else { + tableName_ = value; + } + onChanged(); + } else { + tableNameBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .hbase.pb.TableName table_name = 2; + */ + public Builder clearTableName() { + if (tableNameBuilder_ == null) { + tableName_ = null; + onChanged(); + } else { + tableNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + /** + * required .hbase.pb.TableName table_name = 2; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getTableNameFieldBuilder().getBuilder(); + } + /** + * required .hbase.pb.TableName table_name = 2; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + if (tableNameBuilder_ != null) { + return tableNameBuilder_.getMessageOrBuilder(); + } else { + return tableName_ == null ? + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.getDefaultInstance() : tableName_; + } + } + /** + * required .hbase.pb.TableName table_name = 2; + */ + private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableNameFieldBuilder() { + if (tableNameBuilder_ == null) { + tableNameBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + getTableName(), + getParentForChildren(), + isClean()); + tableName_ = null; + } + return tableNameBuilder_; + } + + private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo parentRegionInfo_ = null; + private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> parentRegionInfoBuilder_; + /** + * required .hbase.pb.RegionInfo parent_region_info = 3; + */ + public boolean hasParentRegionInfo() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required .hbase.pb.RegionInfo parent_region_info = 3; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getParentRegionInfo() { + if (parentRegionInfoBuilder_ == null) { + return parentRegionInfo_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : parentRegionInfo_; + } else { + return parentRegionInfoBuilder_.getMessage(); + } + } + /** + * required .hbase.pb.RegionInfo parent_region_info = 3; + */ + public Builder setParentRegionInfo(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo value) { + if (parentRegionInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + parentRegionInfo_ = value; + onChanged(); + } else { + parentRegionInfoBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * required .hbase.pb.RegionInfo parent_region_info = 3; + */ + public Builder setParentRegionInfo( + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) { + if (parentRegionInfoBuilder_ == null) { + parentRegionInfo_ = builderForValue.build(); + onChanged(); + } else { + parentRegionInfoBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * required .hbase.pb.RegionInfo parent_region_info = 3; + */ + public Builder mergeParentRegionInfo(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo value) { + if (parentRegionInfoBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004) && + parentRegionInfo_ != null && + parentRegionInfo_ != org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()) { + parentRegionInfo_ = + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.newBuilder(parentRegionInfo_).mergeFrom(value).buildPartial(); + } else { + parentRegionInfo_ = value; + } + onChanged(); + } else { + parentRegionInfoBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * required .hbase.pb.RegionInfo parent_region_info = 3; + */ + public Builder clearParentRegionInfo() { + if (parentRegionInfoBuilder_ == null) { + parentRegionInfo_ = null; + onChanged(); + } else { + parentRegionInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + /** + * required .hbase.pb.RegionInfo parent_region_info = 3; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder getParentRegionInfoBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return getParentRegionInfoFieldBuilder().getBuilder(); + } + /** + * required .hbase.pb.RegionInfo parent_region_info = 3; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getParentRegionInfoOrBuilder() { + if (parentRegionInfoBuilder_ != null) { + return parentRegionInfoBuilder_.getMessageOrBuilder(); + } else { + return parentRegionInfo_ == null ? + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : parentRegionInfo_; + } + } + /** + * required .hbase.pb.RegionInfo parent_region_info = 3; + */ + private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> + getParentRegionInfoFieldBuilder() { + if (parentRegionInfoBuilder_ == null) { + parentRegionInfoBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>( + getParentRegionInfo(), + getParentForChildren(), + isClean()); + parentRegionInfo_ = null; + } + return parentRegionInfoBuilder_; + } + + private org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString splitRow_ = org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.EMPTY; + /** + * optional bytes split_row = 4; + */ + public boolean hasSplitRow() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional bytes split_row = 4; + */ + public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString getSplitRow() { + return splitRow_; + } + /** + * optional bytes split_row = 4; + */ + public Builder setSplitRow(org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000008; + splitRow_ = value; + onChanged(); + return this; + } + /** + * optional bytes split_row = 4; + */ + public Builder clearSplitRow() { + bitField0_ = (bitField0_ & ~0x00000008); + splitRow_ = getDefaultInstance().getSplitRow(); + onChanged(); + return this; + } + + private java.util.List childRegionInfo_ = + java.util.Collections.emptyList(); + private void ensureChildRegionInfoIsMutable() { + if (!((bitField0_ & 0x00000010) == 0x00000010)) { + childRegionInfo_ = new java.util.ArrayList(childRegionInfo_); + bitField0_ |= 0x00000010; + } + } + + private org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> childRegionInfoBuilder_; + + /** + * repeated .hbase.pb.RegionInfo child_region_info = 5; + */ + public java.util.List getChildRegionInfoList() { + if (childRegionInfoBuilder_ == null) { + return java.util.Collections.unmodifiableList(childRegionInfo_); + } else { + return childRegionInfoBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.RegionInfo child_region_info = 5; + */ + public int getChildRegionInfoCount() { + if (childRegionInfoBuilder_ == null) { + return childRegionInfo_.size(); + } else { + return childRegionInfoBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.RegionInfo child_region_info = 5; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getChildRegionInfo(int index) { + if (childRegionInfoBuilder_ == null) { + return childRegionInfo_.get(index); + } else { + return childRegionInfoBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.RegionInfo child_region_info = 5; + */ + public Builder setChildRegionInfo( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo value) { + if (childRegionInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureChildRegionInfoIsMutable(); + childRegionInfo_.set(index, value); + onChanged(); + } else { + childRegionInfoBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.RegionInfo child_region_info = 5; + */ + public Builder setChildRegionInfo( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) { + if (childRegionInfoBuilder_ == null) { + ensureChildRegionInfoIsMutable(); + childRegionInfo_.set(index, builderForValue.build()); + onChanged(); + } else { + childRegionInfoBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.RegionInfo child_region_info = 5; + */ + public Builder addChildRegionInfo(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo value) { + if (childRegionInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureChildRegionInfoIsMutable(); + childRegionInfo_.add(value); + onChanged(); + } else { + childRegionInfoBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.RegionInfo child_region_info = 5; + */ + public Builder addChildRegionInfo( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo value) { + if (childRegionInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureChildRegionInfoIsMutable(); + childRegionInfo_.add(index, value); + onChanged(); + } else { + childRegionInfoBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.RegionInfo child_region_info = 5; + */ + public Builder addChildRegionInfo( + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) { + if (childRegionInfoBuilder_ == null) { + ensureChildRegionInfoIsMutable(); + childRegionInfo_.add(builderForValue.build()); + onChanged(); + } else { + childRegionInfoBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.RegionInfo child_region_info = 5; + */ + public Builder addChildRegionInfo( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) { + if (childRegionInfoBuilder_ == null) { + ensureChildRegionInfoIsMutable(); + childRegionInfo_.add(index, builderForValue.build()); + onChanged(); + } else { + childRegionInfoBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.RegionInfo child_region_info = 5; + */ + public Builder addAllChildRegionInfo( + java.lang.Iterable values) { + if (childRegionInfoBuilder_ == null) { + ensureChildRegionInfoIsMutable(); + org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, childRegionInfo_); + onChanged(); + } else { + childRegionInfoBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.RegionInfo child_region_info = 5; + */ + public Builder clearChildRegionInfo() { + if (childRegionInfoBuilder_ == null) { + childRegionInfo_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + } else { + childRegionInfoBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.RegionInfo child_region_info = 5; + */ + public Builder removeChildRegionInfo(int index) { + if (childRegionInfoBuilder_ == null) { + ensureChildRegionInfoIsMutable(); + childRegionInfo_.remove(index); + onChanged(); + } else { + childRegionInfoBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.RegionInfo child_region_info = 5; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder getChildRegionInfoBuilder( + int index) { + return getChildRegionInfoFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.RegionInfo child_region_info = 5; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getChildRegionInfoOrBuilder( + int index) { + if (childRegionInfoBuilder_ == null) { + return childRegionInfo_.get(index); } else { + return childRegionInfoBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.RegionInfo child_region_info = 5; + */ + public java.util.List + getChildRegionInfoOrBuilderList() { + if (childRegionInfoBuilder_ != null) { + return childRegionInfoBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(childRegionInfo_); + } + } + /** + * repeated .hbase.pb.RegionInfo child_region_info = 5; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder addChildRegionInfoBuilder() { + return getChildRegionInfoFieldBuilder().addBuilder( + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()); + } + /** + * repeated .hbase.pb.RegionInfo child_region_info = 5; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder addChildRegionInfoBuilder( + int index) { + return getChildRegionInfoFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()); + } + /** + * repeated .hbase.pb.RegionInfo child_region_info = 5; + */ + public java.util.List + getChildRegionInfoBuilderList() { + return getChildRegionInfoFieldBuilder().getBuilderList(); + } + private org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> + getChildRegionInfoFieldBuilder() { + if (childRegionInfoBuilder_ == null) { + childRegionInfoBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>( + childRegionInfo_, + ((bitField0_ & 0x00000010) == 0x00000010), + getParentForChildren(), + isClean()); + childRegionInfo_ = null; + } + return childRegionInfoBuilder_; + } + public final Builder setUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + public final Builder mergeUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:hbase.pb.SplitTableRegionStateData) + } + + // @@protoc_insertion_point(class_scope:hbase.pb.SplitTableRegionStateData) + private static final org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStateData DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStateData(); + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStateData getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser + PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser() { + public SplitTableRegionStateData parsePartialFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return new SplitTableRegionStateData(input, extensionRegistry); + } + }; + + public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStateData getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + public interface ServerCrashStateDataOrBuilder extends // @@protoc_insertion_point(interface_extends:hbase.pb.ServerCrashStateData) org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { @@ -22639,6 +24287,11 @@ public final class MasterProcedureProtos { private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hbase_pb_DispatchMergingRegionsStateData_fieldAccessorTable; + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_SplitTableRegionStateData_descriptor; + private static final + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_hbase_pb_SplitTableRegionStateData_fieldAccessorTable; private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor internal_static_hbase_pb_ServerCrashStateData_descriptor; private static final @@ -22732,108 +24385,125 @@ public final class MasterProcedureProtos { "e.pb.UserInformation\022\'\n\ntable_name\030\002 \002(\013" + "2\023.hbase.pb.TableName\022)\n\013region_info\030\003 \003" + "(\0132\024.hbase.pb.RegionInfo\022\020\n\010forcible\030\004 \001" + - "(\010\"\201\002\n\024ServerCrashStateData\022)\n\013server_na", - "me\030\001 \002(\0132\024.hbase.pb.ServerName\022\036\n\026distri" + - "buted_log_replay\030\002 \001(\010\0227\n\031regions_on_cra" + - "shed_server\030\003 \003(\0132\024.hbase.pb.RegionInfo\022" + - ".\n\020regions_assigned\030\004 \003(\0132\024.hbase.pb.Reg" + - "ionInfo\022\025\n\rcarrying_meta\030\005 \001(\010\022\036\n\020should" + - "_split_wal\030\006 \001(\010:\004true*\330\001\n\020CreateTableSt" + - "ate\022\036\n\032CREATE_TABLE_PRE_OPERATION\020\001\022 \n\034C" + - "REATE_TABLE_WRITE_FS_LAYOUT\020\002\022\034\n\030CREATE_" + - "TABLE_ADD_TO_META\020\003\022\037\n\033CREATE_TABLE_ASSI" + - "GN_REGIONS\020\004\022\"\n\036CREATE_TABLE_UPDATE_DESC", - "_CACHE\020\005\022\037\n\033CREATE_TABLE_POST_OPERATION\020" + - "\006*\207\002\n\020ModifyTableState\022\030\n\024MODIFY_TABLE_P" + - "REPARE\020\001\022\036\n\032MODIFY_TABLE_PRE_OPERATION\020\002" + - "\022(\n$MODIFY_TABLE_UPDATE_TABLE_DESCRIPTOR" + - "\020\003\022&\n\"MODIFY_TABLE_REMOVE_REPLICA_COLUMN" + - "\020\004\022!\n\035MODIFY_TABLE_DELETE_FS_LAYOUT\020\005\022\037\n" + - "\033MODIFY_TABLE_POST_OPERATION\020\006\022#\n\037MODIFY" + - "_TABLE_REOPEN_ALL_REGIONS\020\007*\212\002\n\022Truncate" + - "TableState\022 \n\034TRUNCATE_TABLE_PRE_OPERATI" + - "ON\020\001\022#\n\037TRUNCATE_TABLE_REMOVE_FROM_META\020", - "\002\022\"\n\036TRUNCATE_TABLE_CLEAR_FS_LAYOUT\020\003\022#\n" + - "\037TRUNCATE_TABLE_CREATE_FS_LAYOUT\020\004\022\036\n\032TR" + - "UNCATE_TABLE_ADD_TO_META\020\005\022!\n\035TRUNCATE_T" + - "ABLE_ASSIGN_REGIONS\020\006\022!\n\035TRUNCATE_TABLE_" + - "POST_OPERATION\020\007*\337\001\n\020DeleteTableState\022\036\n" + - "\032DELETE_TABLE_PRE_OPERATION\020\001\022!\n\035DELETE_" + - "TABLE_REMOVE_FROM_META\020\002\022 \n\034DELETE_TABLE" + - "_CLEAR_FS_LAYOUT\020\003\022\"\n\036DELETE_TABLE_UPDAT" + - "E_DESC_CACHE\020\004\022!\n\035DELETE_TABLE_UNASSIGN_" + - "REGIONS\020\005\022\037\n\033DELETE_TABLE_POST_OPERATION", - "\020\006*\320\001\n\024CreateNamespaceState\022\034\n\030CREATE_NA" + - "MESPACE_PREPARE\020\001\022%\n!CREATE_NAMESPACE_CR" + - "EATE_DIRECTORY\020\002\022)\n%CREATE_NAMESPACE_INS" + - "ERT_INTO_NS_TABLE\020\003\022\036\n\032CREATE_NAMESPACE_" + - "UPDATE_ZK\020\004\022(\n$CREATE_NAMESPACE_SET_NAME" + - "SPACE_QUOTA\020\005*z\n\024ModifyNamespaceState\022\034\n" + - "\030MODIFY_NAMESPACE_PREPARE\020\001\022$\n MODIFY_NA" + - "MESPACE_UPDATE_NS_TABLE\020\002\022\036\n\032MODIFY_NAME" + - "SPACE_UPDATE_ZK\020\003*\332\001\n\024DeleteNamespaceSta" + - "te\022\034\n\030DELETE_NAMESPACE_PREPARE\020\001\022)\n%DELE", - "TE_NAMESPACE_DELETE_FROM_NS_TABLE\020\002\022#\n\037D" + - "ELETE_NAMESPACE_REMOVE_FROM_ZK\020\003\022\'\n#DELE" + - "TE_NAMESPACE_DELETE_DIRECTORIES\020\004\022+\n\'DEL" + - "ETE_NAMESPACE_REMOVE_NAMESPACE_QUOTA\020\005*\331" + - "\001\n\024AddColumnFamilyState\022\035\n\031ADD_COLUMN_FA" + - "MILY_PREPARE\020\001\022#\n\037ADD_COLUMN_FAMILY_PRE_" + - "OPERATION\020\002\022-\n)ADD_COLUMN_FAMILY_UPDATE_" + - "TABLE_DESCRIPTOR\020\003\022$\n ADD_COLUMN_FAMILY_" + - "POST_OPERATION\020\004\022(\n$ADD_COLUMN_FAMILY_RE" + - "OPEN_ALL_REGIONS\020\005*\353\001\n\027ModifyColumnFamil", - "yState\022 \n\034MODIFY_COLUMN_FAMILY_PREPARE\020\001" + - "\022&\n\"MODIFY_COLUMN_FAMILY_PRE_OPERATION\020\002" + - "\0220\n,MODIFY_COLUMN_FAMILY_UPDATE_TABLE_DE" + - "SCRIPTOR\020\003\022\'\n#MODIFY_COLUMN_FAMILY_POST_" + - "OPERATION\020\004\022+\n\'MODIFY_COLUMN_FAMILY_REOP" + - "EN_ALL_REGIONS\020\005*\226\002\n\027DeleteColumnFamilyS" + - "tate\022 \n\034DELETE_COLUMN_FAMILY_PREPARE\020\001\022&" + - "\n\"DELETE_COLUMN_FAMILY_PRE_OPERATION\020\002\0220" + - "\n,DELETE_COLUMN_FAMILY_UPDATE_TABLE_DESC" + - "RIPTOR\020\003\022)\n%DELETE_COLUMN_FAMILY_DELETE_", - "FS_LAYOUT\020\004\022\'\n#DELETE_COLUMN_FAMILY_POST" + - "_OPERATION\020\005\022+\n\'DELETE_COLUMN_FAMILY_REO" + - "PEN_ALL_REGIONS\020\006*\350\001\n\020EnableTableState\022\030" + - "\n\024ENABLE_TABLE_PREPARE\020\001\022\036\n\032ENABLE_TABLE" + - "_PRE_OPERATION\020\002\022)\n%ENABLE_TABLE_SET_ENA" + - "BLING_TABLE_STATE\020\003\022$\n ENABLE_TABLE_MARK" + - "_REGIONS_ONLINE\020\004\022(\n$ENABLE_TABLE_SET_EN" + - "ABLED_TABLE_STATE\020\005\022\037\n\033ENABLE_TABLE_POST" + - "_OPERATION\020\006*\362\001\n\021DisableTableState\022\031\n\025DI" + - "SABLE_TABLE_PREPARE\020\001\022\037\n\033DISABLE_TABLE_P", - "RE_OPERATION\020\002\022+\n\'DISABLE_TABLE_SET_DISA" + - "BLING_TABLE_STATE\020\003\022&\n\"DISABLE_TABLE_MAR" + - "K_REGIONS_OFFLINE\020\004\022*\n&DISABLE_TABLE_SET" + - "_DISABLED_TABLE_STATE\020\005\022 \n\034DISABLE_TABLE" + - "_POST_OPERATION\020\006*\346\001\n\022CloneSnapshotState" + - "\022 \n\034CLONE_SNAPSHOT_PRE_OPERATION\020\001\022\"\n\036CL" + - "ONE_SNAPSHOT_WRITE_FS_LAYOUT\020\002\022\036\n\032CLONE_" + - "SNAPSHOT_ADD_TO_META\020\003\022!\n\035CLONE_SNAPSHOT" + - "_ASSIGN_REGIONS\020\004\022$\n CLONE_SNAPSHOT_UPDA" + - "TE_DESC_CACHE\020\005\022!\n\035CLONE_SNAPSHOT_POST_O", - "PERATION\020\006*\260\001\n\024RestoreSnapshotState\022\"\n\036R" + - "ESTORE_SNAPSHOT_PRE_OPERATION\020\001\022,\n(RESTO" + - "RE_SNAPSHOT_UPDATE_TABLE_DESCRIPTOR\020\002\022$\n" + - " RESTORE_SNAPSHOT_WRITE_FS_LAYOUT\020\003\022 \n\034R" + - "ESTORE_SNAPSHOT_UPDATE_META\020\004*\376\001\n\033Dispat" + - "chMergingRegionsState\022$\n DISPATCH_MERGIN" + - "G_REGIONS_PREPARE\020\001\022*\n&DISPATCH_MERGING_" + - "REGIONS_PRE_OPERATION\020\002\0223\n/DISPATCH_MERG" + - "ING_REGIONS_MOVE_REGION_TO_SAME_RS\020\003\022+\n\'" + - "DISPATCH_MERGING_REGIONS_DO_MERGE_IN_RS\020", - "\004\022+\n\'DISPATCH_MERGING_REGIONS_POST_OPERA" + - "TION\020\005*\234\002\n\020ServerCrashState\022\026\n\022SERVER_CR" + - "ASH_START\020\001\022\035\n\031SERVER_CRASH_PROCESS_META" + - "\020\002\022\034\n\030SERVER_CRASH_GET_REGIONS\020\003\022\036\n\032SERV" + - "ER_CRASH_NO_SPLIT_LOGS\020\004\022\033\n\027SERVER_CRASH" + - "_SPLIT_LOGS\020\005\022#\n\037SERVER_CRASH_PREPARE_LO" + - "G_REPLAY\020\006\022\027\n\023SERVER_CRASH_ASSIGN\020\010\022\037\n\033S" + - "ERVER_CRASH_WAIT_ON_ASSIGN\020\t\022\027\n\023SERVER_C" + - "RASH_FINISH\020dBR\n1org.apache.hadoop.hbase" + - ".shaded.protobuf.generatedB\025MasterProced", - "ureProtosH\001\210\001\001\240\001\001" + "(\010\"\350\001\n\031SplitTableRegionStateData\022,\n\tuser", + "_info\030\001 \002(\0132\031.hbase.pb.UserInformation\022\'" + + "\n\ntable_name\030\002 \002(\0132\023.hbase.pb.TableName\022" + + "0\n\022parent_region_info\030\003 \002(\0132\024.hbase.pb.R" + + "egionInfo\022\021\n\tsplit_row\030\004 \001(\014\022/\n\021child_re" + + "gion_info\030\005 \003(\0132\024.hbase.pb.RegionInfo\"\201\002" + + "\n\024ServerCrashStateData\022)\n\013server_name\030\001 " + + "\002(\0132\024.hbase.pb.ServerName\022\036\n\026distributed" + + "_log_replay\030\002 \001(\010\0227\n\031regions_on_crashed_" + + "server\030\003 \003(\0132\024.hbase.pb.RegionInfo\022.\n\020re" + + "gions_assigned\030\004 \003(\0132\024.hbase.pb.RegionIn", + "fo\022\025\n\rcarrying_meta\030\005 \001(\010\022\036\n\020should_spli" + + "t_wal\030\006 \001(\010:\004true*\330\001\n\020CreateTableState\022\036" + + "\n\032CREATE_TABLE_PRE_OPERATION\020\001\022 \n\034CREATE" + + "_TABLE_WRITE_FS_LAYOUT\020\002\022\034\n\030CREATE_TABLE" + + "_ADD_TO_META\020\003\022\037\n\033CREATE_TABLE_ASSIGN_RE" + + "GIONS\020\004\022\"\n\036CREATE_TABLE_UPDATE_DESC_CACH" + + "E\020\005\022\037\n\033CREATE_TABLE_POST_OPERATION\020\006*\207\002\n" + + "\020ModifyTableState\022\030\n\024MODIFY_TABLE_PREPAR" + + "E\020\001\022\036\n\032MODIFY_TABLE_PRE_OPERATION\020\002\022(\n$M" + + "ODIFY_TABLE_UPDATE_TABLE_DESCRIPTOR\020\003\022&\n", + "\"MODIFY_TABLE_REMOVE_REPLICA_COLUMN\020\004\022!\n" + + "\035MODIFY_TABLE_DELETE_FS_LAYOUT\020\005\022\037\n\033MODI" + + "FY_TABLE_POST_OPERATION\020\006\022#\n\037MODIFY_TABL" + + "E_REOPEN_ALL_REGIONS\020\007*\212\002\n\022TruncateTable" + + "State\022 \n\034TRUNCATE_TABLE_PRE_OPERATION\020\001\022" + + "#\n\037TRUNCATE_TABLE_REMOVE_FROM_META\020\002\022\"\n\036" + + "TRUNCATE_TABLE_CLEAR_FS_LAYOUT\020\003\022#\n\037TRUN" + + "CATE_TABLE_CREATE_FS_LAYOUT\020\004\022\036\n\032TRUNCAT" + + "E_TABLE_ADD_TO_META\020\005\022!\n\035TRUNCATE_TABLE_" + + "ASSIGN_REGIONS\020\006\022!\n\035TRUNCATE_TABLE_POST_", + "OPERATION\020\007*\337\001\n\020DeleteTableState\022\036\n\032DELE" + + "TE_TABLE_PRE_OPERATION\020\001\022!\n\035DELETE_TABLE" + + "_REMOVE_FROM_META\020\002\022 \n\034DELETE_TABLE_CLEA" + + "R_FS_LAYOUT\020\003\022\"\n\036DELETE_TABLE_UPDATE_DES" + + "C_CACHE\020\004\022!\n\035DELETE_TABLE_UNASSIGN_REGIO" + + "NS\020\005\022\037\n\033DELETE_TABLE_POST_OPERATION\020\006*\320\001" + + "\n\024CreateNamespaceState\022\034\n\030CREATE_NAMESPA" + + "CE_PREPARE\020\001\022%\n!CREATE_NAMESPACE_CREATE_" + + "DIRECTORY\020\002\022)\n%CREATE_NAMESPACE_INSERT_I" + + "NTO_NS_TABLE\020\003\022\036\n\032CREATE_NAMESPACE_UPDAT", + "E_ZK\020\004\022(\n$CREATE_NAMESPACE_SET_NAMESPACE" + + "_QUOTA\020\005*z\n\024ModifyNamespaceState\022\034\n\030MODI" + + "FY_NAMESPACE_PREPARE\020\001\022$\n MODIFY_NAMESPA" + + "CE_UPDATE_NS_TABLE\020\002\022\036\n\032MODIFY_NAMESPACE" + + "_UPDATE_ZK\020\003*\332\001\n\024DeleteNamespaceState\022\034\n" + + "\030DELETE_NAMESPACE_PREPARE\020\001\022)\n%DELETE_NA" + + "MESPACE_DELETE_FROM_NS_TABLE\020\002\022#\n\037DELETE" + + "_NAMESPACE_REMOVE_FROM_ZK\020\003\022\'\n#DELETE_NA" + + "MESPACE_DELETE_DIRECTORIES\020\004\022+\n\'DELETE_N" + + "AMESPACE_REMOVE_NAMESPACE_QUOTA\020\005*\331\001\n\024Ad", + "dColumnFamilyState\022\035\n\031ADD_COLUMN_FAMILY_" + + "PREPARE\020\001\022#\n\037ADD_COLUMN_FAMILY_PRE_OPERA" + + "TION\020\002\022-\n)ADD_COLUMN_FAMILY_UPDATE_TABLE" + + "_DESCRIPTOR\020\003\022$\n ADD_COLUMN_FAMILY_POST_" + + "OPERATION\020\004\022(\n$ADD_COLUMN_FAMILY_REOPEN_" + + "ALL_REGIONS\020\005*\353\001\n\027ModifyColumnFamilyStat" + + "e\022 \n\034MODIFY_COLUMN_FAMILY_PREPARE\020\001\022&\n\"M" + + "ODIFY_COLUMN_FAMILY_PRE_OPERATION\020\002\0220\n,M" + + "ODIFY_COLUMN_FAMILY_UPDATE_TABLE_DESCRIP" + + "TOR\020\003\022\'\n#MODIFY_COLUMN_FAMILY_POST_OPERA", + "TION\020\004\022+\n\'MODIFY_COLUMN_FAMILY_REOPEN_AL" + + "L_REGIONS\020\005*\226\002\n\027DeleteColumnFamilyState\022" + + " \n\034DELETE_COLUMN_FAMILY_PREPARE\020\001\022&\n\"DEL" + + "ETE_COLUMN_FAMILY_PRE_OPERATION\020\002\0220\n,DEL" + + "ETE_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTO" + + "R\020\003\022)\n%DELETE_COLUMN_FAMILY_DELETE_FS_LA" + + "YOUT\020\004\022\'\n#DELETE_COLUMN_FAMILY_POST_OPER" + + "ATION\020\005\022+\n\'DELETE_COLUMN_FAMILY_REOPEN_A" + + "LL_REGIONS\020\006*\350\001\n\020EnableTableState\022\030\n\024ENA" + + "BLE_TABLE_PREPARE\020\001\022\036\n\032ENABLE_TABLE_PRE_", + "OPERATION\020\002\022)\n%ENABLE_TABLE_SET_ENABLING" + + "_TABLE_STATE\020\003\022$\n ENABLE_TABLE_MARK_REGI" + + "ONS_ONLINE\020\004\022(\n$ENABLE_TABLE_SET_ENABLED" + + "_TABLE_STATE\020\005\022\037\n\033ENABLE_TABLE_POST_OPER" + + "ATION\020\006*\362\001\n\021DisableTableState\022\031\n\025DISABLE" + + "_TABLE_PREPARE\020\001\022\037\n\033DISABLE_TABLE_PRE_OP" + + "ERATION\020\002\022+\n\'DISABLE_TABLE_SET_DISABLING" + + "_TABLE_STATE\020\003\022&\n\"DISABLE_TABLE_MARK_REG" + + "IONS_OFFLINE\020\004\022*\n&DISABLE_TABLE_SET_DISA" + + "BLED_TABLE_STATE\020\005\022 \n\034DISABLE_TABLE_POST", + "_OPERATION\020\006*\346\001\n\022CloneSnapshotState\022 \n\034C" + + "LONE_SNAPSHOT_PRE_OPERATION\020\001\022\"\n\036CLONE_S" + + "NAPSHOT_WRITE_FS_LAYOUT\020\002\022\036\n\032CLONE_SNAPS" + + "HOT_ADD_TO_META\020\003\022!\n\035CLONE_SNAPSHOT_ASSI" + + "GN_REGIONS\020\004\022$\n CLONE_SNAPSHOT_UPDATE_DE" + + "SC_CACHE\020\005\022!\n\035CLONE_SNAPSHOT_POST_OPERAT" + + "ION\020\006*\260\001\n\024RestoreSnapshotState\022\"\n\036RESTOR" + + "E_SNAPSHOT_PRE_OPERATION\020\001\022,\n(RESTORE_SN" + + "APSHOT_UPDATE_TABLE_DESCRIPTOR\020\002\022$\n REST" + + "ORE_SNAPSHOT_WRITE_FS_LAYOUT\020\003\022 \n\034RESTOR", + "E_SNAPSHOT_UPDATE_META\020\004*\376\001\n\033DispatchMer" + + "gingRegionsState\022$\n DISPATCH_MERGING_REG" + + "IONS_PREPARE\020\001\022*\n&DISPATCH_MERGING_REGIO" + + "NS_PRE_OPERATION\020\002\0223\n/DISPATCH_MERGING_R" + + "EGIONS_MOVE_REGION_TO_SAME_RS\020\003\022+\n\'DISPA" + + "TCH_MERGING_REGIONS_DO_MERGE_IN_RS\020\004\022+\n\'" + + "DISPATCH_MERGING_REGIONS_POST_OPERATION\020" + + "\005*\305\003\n\025SplitTableRegionState\022\036\n\032SPLIT_TAB" + + "LE_REGION_PREPARE\020\001\022$\n SPLIT_TABLE_REGIO" + + "N_PRE_OPERATION\020\002\0220\n,SPLIT_TABLE_REGION_", + "SET_SPLITTING_TABLE_STATE\020\003\022+\n\'SPLIT_TAB" + + "LE_REGION_CLOSED_PARENT_REGION\020\004\022.\n*SPLI" + + "T_TABLE_REGION_CREATE_DAUGHTER_REGIONS\020\005" + + "\0220\n,SPLIT_TABLE_REGION_PRE_OPERATION_BEF" + + "ORE_PONR\020\006\022\"\n\036SPLIT_TABLE_REGION_UPDATE_" + + "META\020\007\022/\n+SPLIT_TABLE_REGION_PRE_OPERATI" + + "ON_AFTER_PONR\020\010\022)\n%SPLIT_TABLE_REGION_OP" + + "EN_CHILD_REGIONS\020\t\022%\n!SPLIT_TABLE_REGION" + + "_POST_OPERATION\020\n*\234\002\n\020ServerCrashState\022\026" + + "\n\022SERVER_CRASH_START\020\001\022\035\n\031SERVER_CRASH_P", + "ROCESS_META\020\002\022\034\n\030SERVER_CRASH_GET_REGION" + + "S\020\003\022\036\n\032SERVER_CRASH_NO_SPLIT_LOGS\020\004\022\033\n\027S" + + "ERVER_CRASH_SPLIT_LOGS\020\005\022#\n\037SERVER_CRASH" + + "_PREPARE_LOG_REPLAY\020\006\022\027\n\023SERVER_CRASH_AS" + + "SIGN\020\010\022\037\n\033SERVER_CRASH_WAIT_ON_ASSIGN\020\t\022" + + "\027\n\023SERVER_CRASH_FINISH\020dBR\n1org.apache.h" + + "adoop.hbase.shaded.protobuf.generatedB\025M" + + "asterProcedureProtosH\001\210\001\001\240\001\001" }; org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() { @@ -22945,8 +24615,14 @@ public final class MasterProcedureProtos { org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_DispatchMergingRegionsStateData_descriptor, new java.lang.String[] { "UserInfo", "TableName", "RegionInfo", "Forcible", }); - internal_static_hbase_pb_ServerCrashStateData_descriptor = + internal_static_hbase_pb_SplitTableRegionStateData_descriptor = getDescriptor().getMessageTypes().get(16); + internal_static_hbase_pb_SplitTableRegionStateData_fieldAccessorTable = new + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_hbase_pb_SplitTableRegionStateData_descriptor, + new java.lang.String[] { "UserInfo", "TableName", "ParentRegionInfo", "SplitRow", "ChildRegionInfo", }); + internal_static_hbase_pb_ServerCrashStateData_descriptor = + getDescriptor().getMessageTypes().get(17); internal_static_hbase_pb_ServerCrashStateData_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_ServerCrashStateData_descriptor, diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/RegionServerStatusProtos.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/RegionServerStatusProtos.java index b8712b7c71f..8f368e9d421 100644 --- a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/RegionServerStatusProtos.java +++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/RegionServerStatusProtos.java @@ -8822,6 +8822,1348 @@ public final class RegionServerStatusProtos { } + public interface SplitTableRegionRequestOrBuilder extends + // @@protoc_insertion_point(interface_extends:hbase.pb.SplitTableRegionRequest) + org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { + + /** + * required .hbase.pb.RegionInfo region_info = 1; + */ + boolean hasRegionInfo(); + /** + * required .hbase.pb.RegionInfo region_info = 1; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(); + /** + * required .hbase.pb.RegionInfo region_info = 1; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder(); + + /** + * required bytes split_row = 2; + */ + boolean hasSplitRow(); + /** + * required bytes split_row = 2; + */ + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString getSplitRow(); + + /** + * optional uint64 nonce_group = 3 [default = 0]; + */ + boolean hasNonceGroup(); + /** + * optional uint64 nonce_group = 3 [default = 0]; + */ + long getNonceGroup(); + + /** + * optional uint64 nonce = 4 [default = 0]; + */ + boolean hasNonce(); + /** + * optional uint64 nonce = 4 [default = 0]; + */ + long getNonce(); + } + /** + *
+   **
+   * Splits the specified region.
+   * 
+ * + * Protobuf type {@code hbase.pb.SplitTableRegionRequest} + */ + public static final class SplitTableRegionRequest extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:hbase.pb.SplitTableRegionRequest) + SplitTableRegionRequestOrBuilder { + // Use SplitTableRegionRequest.newBuilder() to construct. + private SplitTableRegionRequest(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private SplitTableRegionRequest() { + splitRow_ = org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.EMPTY; + nonceGroup_ = 0L; + nonce_ = 0L; + } + + @java.lang.Override + public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private SplitTableRegionRequest( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + this(); + int mutable_bitField0_ = 0; + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields = + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = regionInfo_.toBuilder(); + } + regionInfo_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(regionInfo_); + regionInfo_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 18: { + bitField0_ |= 0x00000002; + splitRow_ = input.readBytes(); + break; + } + case 24: { + bitField0_ |= 0x00000004; + nonceGroup_ = input.readUInt64(); + break; + } + case 32: { + bitField0_ |= 0x00000008; + nonce_ = input.readUInt64(); + break; + } + } + } + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_SplitTableRegionRequest_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_SplitTableRegionRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest.Builder.class); + } + + private int bitField0_; + public static final int REGION_INFO_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo regionInfo_; + /** + * required .hbase.pb.RegionInfo region_info = 1; + */ + public boolean hasRegionInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.RegionInfo region_info = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo() { + return regionInfo_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : regionInfo_; + } + /** + * required .hbase.pb.RegionInfo region_info = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder() { + return regionInfo_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : regionInfo_; + } + + public static final int SPLIT_ROW_FIELD_NUMBER = 2; + private org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString splitRow_; + /** + * required bytes split_row = 2; + */ + public boolean hasSplitRow() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required bytes split_row = 2; + */ + public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString getSplitRow() { + return splitRow_; + } + + public static final int NONCE_GROUP_FIELD_NUMBER = 3; + private long nonceGroup_; + /** + * optional uint64 nonce_group = 3 [default = 0]; + */ + public boolean hasNonceGroup() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional uint64 nonce_group = 3 [default = 0]; + */ + public long getNonceGroup() { + return nonceGroup_; + } + + public static final int NONCE_FIELD_NUMBER = 4; + private long nonce_; + /** + * optional uint64 nonce = 4 [default = 0]; + */ + public boolean hasNonce() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional uint64 nonce = 4 [default = 0]; + */ + public long getNonce() { + return nonce_; + } + + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + if (!hasRegionInfo()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasSplitRow()) { + memoizedIsInitialized = 0; + return false; + } + if (!getRegionInfo().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, getRegionInfo()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, splitRow_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeUInt64(3, nonceGroup_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeUInt64(4, nonce_); + } + unknownFields.writeTo(output); + } + + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeMessageSize(1, getRegionInfo()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeBytesSize(2, splitRow_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeUInt64Size(3, nonceGroup_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeUInt64Size(4, nonce_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest other = (org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest) obj; + + boolean result = true; + result = result && (hasRegionInfo() == other.hasRegionInfo()); + if (hasRegionInfo()) { + result = result && getRegionInfo() + .equals(other.getRegionInfo()); + } + result = result && (hasSplitRow() == other.hasSplitRow()); + if (hasSplitRow()) { + result = result && getSplitRow() + .equals(other.getSplitRow()); + } + result = result && (hasNonceGroup() == other.hasNonceGroup()); + if (hasNonceGroup()) { + result = result && (getNonceGroup() + == other.getNonceGroup()); + } + result = result && (hasNonce() == other.hasNonce()); + if (hasNonce()) { + result = result && (getNonce() + == other.getNonce()); + } + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasRegionInfo()) { + hash = (37 * hash) + REGION_INFO_FIELD_NUMBER; + hash = (53 * hash) + getRegionInfo().hashCode(); + } + if (hasSplitRow()) { + hash = (37 * hash) + SPLIT_ROW_FIELD_NUMBER; + hash = (53 * hash) + getSplitRow().hashCode(); + } + if (hasNonceGroup()) { + hash = (37 * hash) + NONCE_GROUP_FIELD_NUMBER; + hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashLong( + getNonceGroup()); + } + if (hasNonce()) { + hash = (37 * hash) + NONCE_FIELD_NUMBER; + hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashLong( + getNonce()); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest parseFrom(byte[] data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest parseFrom( + byte[] data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest parseFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest parseDelimitedFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+     **
+     * Splits the specified region.
+     * 
+ * + * Protobuf type {@code hbase.pb.SplitTableRegionRequest} + */ + public static final class Builder extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:hbase.pb.SplitTableRegionRequest) + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequestOrBuilder { + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_SplitTableRegionRequest_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_SplitTableRegionRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + getRegionInfoFieldBuilder(); + } + } + public Builder clear() { + super.clear(); + if (regionInfoBuilder_ == null) { + regionInfo_ = null; + } else { + regionInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + splitRow_ = org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000002); + nonceGroup_ = 0L; + bitField0_ = (bitField0_ & ~0x00000004); + nonce_ = 0L; + bitField0_ = (bitField0_ & ~0x00000008); + return this; + } + + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_SplitTableRegionRequest_descriptor; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest build() { + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest buildPartial() { + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest result = new org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (regionInfoBuilder_ == null) { + result.regionInfo_ = regionInfo_; + } else { + result.regionInfo_ = regionInfoBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.splitRow_ = splitRow_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.nonceGroup_ = nonceGroup_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + result.nonce_ = nonce_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder clone() { + return (Builder) super.clone(); + } + public Builder setField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.setField(field, value); + } + public Builder clearField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + public Builder clearOneof( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + public Builder setRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + int index, Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + public Builder addRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.addRepeatedField(field, value); + } + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest) { + return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest other) { + if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest.getDefaultInstance()) return this; + if (other.hasRegionInfo()) { + mergeRegionInfo(other.getRegionInfo()); + } + if (other.hasSplitRow()) { + setSplitRow(other.getSplitRow()); + } + if (other.hasNonceGroup()) { + setNonceGroup(other.getNonceGroup()); + } + if (other.hasNonce()) { + setNonce(other.getNonce()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + public final boolean isInitialized() { + if (!hasRegionInfo()) { + return false; + } + if (!hasSplitRow()) { + return false; + } + if (!getRegionInfo().isInitialized()) { + return false; + } + return true; + } + + public Builder mergeFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo regionInfo_ = null; + private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> regionInfoBuilder_; + /** + * required .hbase.pb.RegionInfo region_info = 1; + */ + public boolean hasRegionInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.RegionInfo region_info = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo() { + if (regionInfoBuilder_ == null) { + return regionInfo_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : regionInfo_; + } else { + return regionInfoBuilder_.getMessage(); + } + } + /** + * required .hbase.pb.RegionInfo region_info = 1; + */ + public Builder setRegionInfo(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo value) { + if (regionInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + regionInfo_ = value; + onChanged(); + } else { + regionInfoBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.RegionInfo region_info = 1; + */ + public Builder setRegionInfo( + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) { + if (regionInfoBuilder_ == null) { + regionInfo_ = builderForValue.build(); + onChanged(); + } else { + regionInfoBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.RegionInfo region_info = 1; + */ + public Builder mergeRegionInfo(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo value) { + if (regionInfoBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + regionInfo_ != null && + regionInfo_ != org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()) { + regionInfo_ = + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.newBuilder(regionInfo_).mergeFrom(value).buildPartial(); + } else { + regionInfo_ = value; + } + onChanged(); + } else { + regionInfoBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.RegionInfo region_info = 1; + */ + public Builder clearRegionInfo() { + if (regionInfoBuilder_ == null) { + regionInfo_ = null; + onChanged(); + } else { + regionInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .hbase.pb.RegionInfo region_info = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder getRegionInfoBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getRegionInfoFieldBuilder().getBuilder(); + } + /** + * required .hbase.pb.RegionInfo region_info = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder() { + if (regionInfoBuilder_ != null) { + return regionInfoBuilder_.getMessageOrBuilder(); + } else { + return regionInfo_ == null ? + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : regionInfo_; + } + } + /** + * required .hbase.pb.RegionInfo region_info = 1; + */ + private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> + getRegionInfoFieldBuilder() { + if (regionInfoBuilder_ == null) { + regionInfoBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>( + getRegionInfo(), + getParentForChildren(), + isClean()); + regionInfo_ = null; + } + return regionInfoBuilder_; + } + + private org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString splitRow_ = org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.EMPTY; + /** + * required bytes split_row = 2; + */ + public boolean hasSplitRow() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required bytes split_row = 2; + */ + public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString getSplitRow() { + return splitRow_; + } + /** + * required bytes split_row = 2; + */ + public Builder setSplitRow(org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + splitRow_ = value; + onChanged(); + return this; + } + /** + * required bytes split_row = 2; + */ + public Builder clearSplitRow() { + bitField0_ = (bitField0_ & ~0x00000002); + splitRow_ = getDefaultInstance().getSplitRow(); + onChanged(); + return this; + } + + private long nonceGroup_ ; + /** + * optional uint64 nonce_group = 3 [default = 0]; + */ + public boolean hasNonceGroup() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional uint64 nonce_group = 3 [default = 0]; + */ + public long getNonceGroup() { + return nonceGroup_; + } + /** + * optional uint64 nonce_group = 3 [default = 0]; + */ + public Builder setNonceGroup(long value) { + bitField0_ |= 0x00000004; + nonceGroup_ = value; + onChanged(); + return this; + } + /** + * optional uint64 nonce_group = 3 [default = 0]; + */ + public Builder clearNonceGroup() { + bitField0_ = (bitField0_ & ~0x00000004); + nonceGroup_ = 0L; + onChanged(); + return this; + } + + private long nonce_ ; + /** + * optional uint64 nonce = 4 [default = 0]; + */ + public boolean hasNonce() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional uint64 nonce = 4 [default = 0]; + */ + public long getNonce() { + return nonce_; + } + /** + * optional uint64 nonce = 4 [default = 0]; + */ + public Builder setNonce(long value) { + bitField0_ |= 0x00000008; + nonce_ = value; + onChanged(); + return this; + } + /** + * optional uint64 nonce = 4 [default = 0]; + */ + public Builder clearNonce() { + bitField0_ = (bitField0_ & ~0x00000008); + nonce_ = 0L; + onChanged(); + return this; + } + public final Builder setUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + public final Builder mergeUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:hbase.pb.SplitTableRegionRequest) + } + + // @@protoc_insertion_point(class_scope:hbase.pb.SplitTableRegionRequest) + private static final org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest(); + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser + PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser() { + public SplitTableRegionRequest parsePartialFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return new SplitTableRegionRequest(input, extensionRegistry); + } + }; + + public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + + public interface SplitTableRegionResponseOrBuilder extends + // @@protoc_insertion_point(interface_extends:hbase.pb.SplitTableRegionResponse) + org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { + + /** + * optional uint64 proc_id = 1; + */ + boolean hasProcId(); + /** + * optional uint64 proc_id = 1; + */ + long getProcId(); + } + /** + * Protobuf type {@code hbase.pb.SplitTableRegionResponse} + */ + public static final class SplitTableRegionResponse extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:hbase.pb.SplitTableRegionResponse) + SplitTableRegionResponseOrBuilder { + // Use SplitTableRegionResponse.newBuilder() to construct. + private SplitTableRegionResponse(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private SplitTableRegionResponse() { + procId_ = 0L; + } + + @java.lang.Override + public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private SplitTableRegionResponse( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + this(); + int mutable_bitField0_ = 0; + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields = + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + procId_ = input.readUInt64(); + break; + } + } + } + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_SplitTableRegionResponse_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_SplitTableRegionResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse.Builder.class); + } + + private int bitField0_; + public static final int PROC_ID_FIELD_NUMBER = 1; + private long procId_; + /** + * optional uint64 proc_id = 1; + */ + public boolean hasProcId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional uint64 proc_id = 1; + */ + public long getProcId() { + return procId_; + } + + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeUInt64(1, procId_); + } + unknownFields.writeTo(output); + } + + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeUInt64Size(1, procId_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse other = (org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse) obj; + + boolean result = true; + result = result && (hasProcId() == other.hasProcId()); + if (hasProcId()) { + result = result && (getProcId() + == other.getProcId()); + } + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasProcId()) { + hash = (37 * hash) + PROC_ID_FIELD_NUMBER; + hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashLong( + getProcId()); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse parseFrom(byte[] data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse parseFrom( + byte[] data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse parseFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse parseDelimitedFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.SplitTableRegionResponse} + */ + public static final class Builder extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:hbase.pb.SplitTableRegionResponse) + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponseOrBuilder { + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_SplitTableRegionResponse_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_SplitTableRegionResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + public Builder clear() { + super.clear(); + procId_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_SplitTableRegionResponse_descriptor; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse build() { + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse buildPartial() { + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse result = new org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.procId_ = procId_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder clone() { + return (Builder) super.clone(); + } + public Builder setField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.setField(field, value); + } + public Builder clearField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + public Builder clearOneof( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + public Builder setRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + int index, Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + public Builder addRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.addRepeatedField(field, value); + } + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse) { + return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse other) { + if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse.getDefaultInstance()) return this; + if (other.hasProcId()) { + setProcId(other.getProcId()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private long procId_ ; + /** + * optional uint64 proc_id = 1; + */ + public boolean hasProcId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional uint64 proc_id = 1; + */ + public long getProcId() { + return procId_; + } + /** + * optional uint64 proc_id = 1; + */ + public Builder setProcId(long value) { + bitField0_ |= 0x00000001; + procId_ = value; + onChanged(); + return this; + } + /** + * optional uint64 proc_id = 1; + */ + public Builder clearProcId() { + bitField0_ = (bitField0_ & ~0x00000001); + procId_ = 0L; + onChanged(); + return this; + } + public final Builder setUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + public final Builder mergeUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:hbase.pb.SplitTableRegionResponse) + } + + // @@protoc_insertion_point(class_scope:hbase.pb.SplitTableRegionResponse) + private static final org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse(); + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser + PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser() { + public SplitTableRegionResponse parsePartialFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return new SplitTableRegionResponse(input, extensionRegistry); + } + }; + + public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + /** * Protobuf service {@code hbase.pb.RegionServerStatusService} */ @@ -8897,6 +10239,32 @@ public final class RegionServerStatusProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + /** + *
+       **
+       * Split region
+       * 
+ * + * rpc SplitRegion(.hbase.pb.SplitTableRegionRequest) returns (.hbase.pb.SplitTableRegionResponse); + */ + public abstract void splitRegion( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + + /** + *
+       **
+       * Get procedure result
+       * 
+ * + * rpc getProcedureResult(.hbase.pb.GetProcedureResultRequest) returns (.hbase.pb.GetProcedureResultResponse); + */ + public abstract void getProcedureResult( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + } public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Service newReflectiveService( @@ -8942,6 +10310,22 @@ public final class RegionServerStatusProtos { impl.reportRegionStateTransition(controller, request, done); } + @java.lang.Override + public void splitRegion( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { + impl.splitRegion(controller, request, done); + } + + @java.lang.Override + public void getProcedureResult( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { + impl.getProcedureResult(controller, request, done); + } + }; } @@ -8974,6 +10358,10 @@ public final class RegionServerStatusProtos { return impl.getLastFlushedSequenceId(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest)request); case 4: return impl.reportRegionStateTransition(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest)request); + case 5: + return impl.splitRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest)request); + case 6: + return impl.getProcedureResult(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest)request); default: throw new java.lang.AssertionError("Can't get here."); } @@ -8998,6 +10386,10 @@ public final class RegionServerStatusProtos { return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest.getDefaultInstance(); case 4: return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest.getDefaultInstance(); + case 5: + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest.getDefaultInstance(); + case 6: + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -9022,6 +10414,10 @@ public final class RegionServerStatusProtos { return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse.getDefaultInstance(); case 4: return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse.getDefaultInstance(); + case 5: + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse.getDefaultInstance(); + case 6: + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -9097,6 +10493,32 @@ public final class RegionServerStatusProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + /** + *
+     **
+     * Split region
+     * 
+ * + * rpc SplitRegion(.hbase.pb.SplitTableRegionRequest) returns (.hbase.pb.SplitTableRegionResponse); + */ + public abstract void splitRegion( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + + /** + *
+     **
+     * Get procedure result
+     * 
+ * + * rpc getProcedureResult(.hbase.pb.GetProcedureResultRequest) returns (.hbase.pb.GetProcedureResultResponse); + */ + public abstract void getProcedureResult( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.ServiceDescriptor getDescriptor() { @@ -9144,6 +10566,16 @@ public final class RegionServerStatusProtos { org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; + case 5: + this.splitRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest)request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 6: + this.getProcedureResult(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest)request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; default: throw new java.lang.AssertionError("Can't get here."); } @@ -9168,6 +10600,10 @@ public final class RegionServerStatusProtos { return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest.getDefaultInstance(); case 4: return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest.getDefaultInstance(); + case 5: + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest.getDefaultInstance(); + case 6: + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -9192,6 +10628,10 @@ public final class RegionServerStatusProtos { return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse.getDefaultInstance(); case 4: return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse.getDefaultInstance(); + case 5: + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse.getDefaultInstance(); + case 6: + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -9287,6 +10727,36 @@ public final class RegionServerStatusProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse.getDefaultInstance())); } + + public void splitRegion( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(5), + controller, + request, + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse.getDefaultInstance(), + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse.class, + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse.getDefaultInstance())); + } + + public void getProcedureResult( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(6), + controller, + request, + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance(), + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse.class, + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance())); + } } public static BlockingInterface newBlockingStub( @@ -9319,6 +10789,16 @@ public final class RegionServerStatusProtos { org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse splitRegion( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest request) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse getProcedureResult( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest request) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException; } private static final class BlockingStub implements BlockingInterface { @@ -9387,6 +10867,30 @@ public final class RegionServerStatusProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse.getDefaultInstance()); } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse splitRegion( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest request) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(5), + controller, + request, + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse getProcedureResult( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest request) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(6), + controller, + request, + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance()); + } + } // @@protoc_insertion_point(class_scope:hbase.pb.RegionServerStatusService) @@ -9447,6 +10951,16 @@ public final class RegionServerStatusProtos { private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hbase_pb_ReportRegionStateTransitionResponse_fieldAccessorTable; + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_SplitTableRegionRequest_descriptor; + private static final + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_hbase_pb_SplitTableRegionRequest_fieldAccessorTable; + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_SplitTableRegionResponse_descriptor; + private static final + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_hbase_pb_SplitTableRegionResponse_fieldAccessorTable; public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { @@ -9457,54 +10971,63 @@ public final class RegionServerStatusProtos { static { java.lang.String[] descriptorData = { "\n\030RegionServerStatus.proto\022\010hbase.pb\032\013HB" + - "ase.proto\032\023ClusterStatus.proto\"\205\001\n\032Regio" + - "nServerStartupRequest\022\014\n\004port\030\001 \002(\r\022\031\n\021s" + - "erver_start_code\030\002 \002(\004\022\033\n\023server_current" + - "_time\030\003 \002(\004\022!\n\031use_this_hostname_instead" + - "\030\004 \001(\t\"L\n\033RegionServerStartupResponse\022-\n" + - "\013map_entries\030\001 \003(\0132\030.hbase.pb.NameString" + - "Pair\"e\n\031RegionServerReportRequest\022$\n\006ser" + - "ver\030\001 \002(\0132\024.hbase.pb.ServerName\022\"\n\004load\030" + - "\002 \001(\0132\024.hbase.pb.ServerLoad\"\034\n\032RegionSer", - "verReportResponse\"X\n\031ReportRSFatalErrorR" + - "equest\022$\n\006server\030\001 \002(\0132\024.hbase.pb.Server" + - "Name\022\025\n\rerror_message\030\002 \002(\t\"\034\n\032ReportRSF" + - "atalErrorResponse\"6\n\037GetLastFlushedSeque" + - "nceIdRequest\022\023\n\013region_name\030\001 \002(\014\"\207\001\n Ge" + - "tLastFlushedSequenceIdResponse\022 \n\030last_f" + - "lushed_sequence_id\030\001 \002(\004\022A\n\036store_last_f" + - "lushed_sequence_id\030\002 \003(\0132\031.hbase.pb.Stor" + - "eSequenceId\"\344\002\n\025RegionStateTransition\022G\n" + - "\017transition_code\030\001 \002(\0162..hbase.pb.Region", - "StateTransition.TransitionCode\022)\n\013region" + - "_info\030\002 \003(\0132\024.hbase.pb.RegionInfo\022\024\n\014ope" + - "n_seq_num\030\003 \001(\004\"\300\001\n\016TransitionCode\022\n\n\006OP" + - "ENED\020\000\022\017\n\013FAILED_OPEN\020\001\022\n\n\006CLOSED\020\002\022\022\n\016R" + - "EADY_TO_SPLIT\020\003\022\022\n\016READY_TO_MERGE\020\004\022\016\n\nS" + - "PLIT_PONR\020\005\022\016\n\nMERGE_PONR\020\006\022\t\n\005SPLIT\020\007\022\n" + - "\n\006MERGED\020\010\022\022\n\016SPLIT_REVERTED\020\t\022\022\n\016MERGE_" + - "REVERTED\020\n\"\177\n\"ReportRegionStateTransitio" + - "nRequest\022$\n\006server\030\001 \002(\0132\024.hbase.pb.Serv" + - "erName\0223\n\ntransition\030\002 \003(\0132\037.hbase.pb.Re", - "gionStateTransition\"<\n#ReportRegionState" + - "TransitionResponse\022\025\n\rerror_message\030\001 \001(" + - "\t2\260\004\n\031RegionServerStatusService\022b\n\023Regio" + - "nServerStartup\022$.hbase.pb.RegionServerSt" + - "artupRequest\032%.hbase.pb.RegionServerStar" + - "tupResponse\022_\n\022RegionServerReport\022#.hbas" + - "e.pb.RegionServerReportRequest\032$.hbase.p" + - "b.RegionServerReportResponse\022_\n\022ReportRS" + - "FatalError\022#.hbase.pb.ReportRSFatalError" + - "Request\032$.hbase.pb.ReportRSFatalErrorRes", - "ponse\022q\n\030GetLastFlushedSequenceId\022).hbas" + - "e.pb.GetLastFlushedSequenceIdRequest\032*.h" + - "base.pb.GetLastFlushedSequenceIdResponse" + - "\022z\n\033ReportRegionStateTransition\022,.hbase." + - "pb.ReportRegionStateTransitionRequest\032-." + - "hbase.pb.ReportRegionStateTransitionResp" + - "onseBU\n1org.apache.hadoop.hbase.shaded.p" + - "rotobuf.generatedB\030RegionServerStatusPro" + - "tosH\001\210\001\001\240\001\001" + "ase.proto\032\014Master.proto\032\023ClusterStatus.p" + + "roto\"\205\001\n\032RegionServerStartupRequest\022\014\n\004p" + + "ort\030\001 \002(\r\022\031\n\021server_start_code\030\002 \002(\004\022\033\n\023" + + "server_current_time\030\003 \002(\004\022!\n\031use_this_ho" + + "stname_instead\030\004 \001(\t\"L\n\033RegionServerStar" + + "tupResponse\022-\n\013map_entries\030\001 \003(\0132\030.hbase" + + ".pb.NameStringPair\"e\n\031RegionServerReport" + + "Request\022$\n\006server\030\001 \002(\0132\024.hbase.pb.Serve" + + "rName\022\"\n\004load\030\002 \001(\0132\024.hbase.pb.ServerLoa", + "d\"\034\n\032RegionServerReportResponse\"X\n\031Repor" + + "tRSFatalErrorRequest\022$\n\006server\030\001 \002(\0132\024.h" + + "base.pb.ServerName\022\025\n\rerror_message\030\002 \002(" + + "\t\"\034\n\032ReportRSFatalErrorResponse\"6\n\037GetLa" + + "stFlushedSequenceIdRequest\022\023\n\013region_nam" + + "e\030\001 \002(\014\"\207\001\n GetLastFlushedSequenceIdResp" + + "onse\022 \n\030last_flushed_sequence_id\030\001 \002(\004\022A" + + "\n\036store_last_flushed_sequence_id\030\002 \003(\0132\031" + + ".hbase.pb.StoreSequenceId\"\344\002\n\025RegionStat" + + "eTransition\022G\n\017transition_code\030\001 \002(\0162..h", + "base.pb.RegionStateTransition.Transition" + + "Code\022)\n\013region_info\030\002 \003(\0132\024.hbase.pb.Reg" + + "ionInfo\022\024\n\014open_seq_num\030\003 \001(\004\"\300\001\n\016Transi" + + "tionCode\022\n\n\006OPENED\020\000\022\017\n\013FAILED_OPEN\020\001\022\n\n" + + "\006CLOSED\020\002\022\022\n\016READY_TO_SPLIT\020\003\022\022\n\016READY_T" + + "O_MERGE\020\004\022\016\n\nSPLIT_PONR\020\005\022\016\n\nMERGE_PONR\020" + + "\006\022\t\n\005SPLIT\020\007\022\n\n\006MERGED\020\010\022\022\n\016SPLIT_REVERT" + + "ED\020\t\022\022\n\016MERGE_REVERTED\020\n\"\177\n\"ReportRegion" + + "StateTransitionRequest\022$\n\006server\030\001 \002(\0132\024" + + ".hbase.pb.ServerName\0223\n\ntransition\030\002 \003(\013", + "2\037.hbase.pb.RegionStateTransition\"<\n#Rep" + + "ortRegionStateTransitionResponse\022\025\n\rerro" + + "r_message\030\001 \001(\t\"\201\001\n\027SplitTableRegionRequ" + + "est\022)\n\013region_info\030\001 \002(\0132\024.hbase.pb.Regi" + + "onInfo\022\021\n\tsplit_row\030\002 \002(\014\022\026\n\013nonce_group" + + "\030\003 \001(\004:\0010\022\020\n\005nonce\030\004 \001(\004:\0010\"+\n\030SplitTabl" + + "eRegionResponse\022\017\n\007proc_id\030\001 \001(\0042\347\005\n\031Reg" + + "ionServerStatusService\022b\n\023RegionServerSt" + + "artup\022$.hbase.pb.RegionServerStartupRequ" + + "est\032%.hbase.pb.RegionServerStartupRespon", + "se\022_\n\022RegionServerReport\022#.hbase.pb.Regi" + + "onServerReportRequest\032$.hbase.pb.RegionS" + + "erverReportResponse\022_\n\022ReportRSFatalErro" + + "r\022#.hbase.pb.ReportRSFatalErrorRequest\032$" + + ".hbase.pb.ReportRSFatalErrorResponse\022q\n\030" + + "GetLastFlushedSequenceId\022).hbase.pb.GetL" + + "astFlushedSequenceIdRequest\032*.hbase.pb.G" + + "etLastFlushedSequenceIdResponse\022z\n\033Repor" + + "tRegionStateTransition\022,.hbase.pb.Report" + + "RegionStateTransitionRequest\032-.hbase.pb.", + "ReportRegionStateTransitionResponse\022T\n\013S" + + "plitRegion\022!.hbase.pb.SplitTableRegionRe" + + "quest\032\".hbase.pb.SplitTableRegionRespons" + + "e\022_\n\022getProcedureResult\022#.hbase.pb.GetPr" + + "ocedureResultRequest\032$.hbase.pb.GetProce" + + "dureResultResponseBU\n1org.apache.hadoop." + + "hbase.shaded.protobuf.generatedB\030RegionS" + + "erverStatusProtosH\001\210\001\001\240\001\001" }; org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() { @@ -9518,6 +11041,7 @@ public final class RegionServerStatusProtos { .internalBuildGeneratedFileFrom(descriptorData, new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor[] { org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.getDescriptor(), + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.getDescriptor(), org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.getDescriptor(), }, assigner); internal_static_hbase_pb_RegionServerStartupRequest_descriptor = @@ -9586,7 +11110,20 @@ public final class RegionServerStatusProtos { org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_ReportRegionStateTransitionResponse_descriptor, new java.lang.String[] { "ErrorMessage", }); + internal_static_hbase_pb_SplitTableRegionRequest_descriptor = + getDescriptor().getMessageTypes().get(11); + internal_static_hbase_pb_SplitTableRegionRequest_fieldAccessorTable = new + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_hbase_pb_SplitTableRegionRequest_descriptor, + new java.lang.String[] { "RegionInfo", "SplitRow", "NonceGroup", "Nonce", }); + internal_static_hbase_pb_SplitTableRegionResponse_descriptor = + getDescriptor().getMessageTypes().get(12); + internal_static_hbase_pb_SplitTableRegionResponse_fieldAccessorTable = new + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_hbase_pb_SplitTableRegionResponse_descriptor, + new java.lang.String[] { "ProcId", }); org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.getDescriptor(); + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.getDescriptor(); org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.getDescriptor(); } diff --git a/hbase-protocol-shaded/src/main/protobuf/Admin.proto b/hbase-protocol-shaded/src/main/protobuf/Admin.proto index e3e9c62ebf9..77cfcff832f 100644 --- a/hbase-protocol-shaded/src/main/protobuf/Admin.proto +++ b/hbase-protocol-shaded/src/main/protobuf/Admin.proto @@ -117,6 +117,18 @@ message CloseRegionResponse { required bool closed = 1; } +/** + * Closes the specified region and create + * child region. + */ +message CloseRegionForSplitRequest { + required RegionSpecifier region = 1; +} + +message CloseRegionForSplitResponse { + required bool closed = 1; +} + /** * Flushes the MemStore of the specified region. *

@@ -274,6 +286,9 @@ service AdminService { rpc CloseRegion(CloseRegionRequest) returns(CloseRegionResponse); + rpc CloseRegionForSplit(CloseRegionForSplitRequest) + returns(CloseRegionForSplitResponse); + rpc FlushRegion(FlushRegionRequest) returns(FlushRegionResponse); diff --git a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto index 0e0b385f9fc..f34a7ff3394 100644 --- a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto +++ b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto @@ -277,6 +277,27 @@ message DispatchMergingRegionsStateData { optional bool forcible = 4; } +enum SplitTableRegionState { + SPLIT_TABLE_REGION_PREPARE = 1; + SPLIT_TABLE_REGION_PRE_OPERATION = 2; + SPLIT_TABLE_REGION_SET_SPLITTING_TABLE_STATE = 3; + SPLIT_TABLE_REGION_CLOSED_PARENT_REGION = 4; + SPLIT_TABLE_REGION_CREATE_DAUGHTER_REGIONS = 5; + SPLIT_TABLE_REGION_PRE_OPERATION_BEFORE_PONR = 6; + SPLIT_TABLE_REGION_UPDATE_META = 7; + SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_PONR = 8; + SPLIT_TABLE_REGION_OPEN_CHILD_REGIONS = 9; + SPLIT_TABLE_REGION_POST_OPERATION = 10; +} + +message SplitTableRegionStateData { + required UserInformation user_info = 1; + required TableName table_name = 2; + required RegionInfo parent_region_info = 3; + optional bytes split_row = 4; + repeated RegionInfo child_region_info = 5; +} + message ServerCrashStateData { required ServerName server_name = 1; optional bool distributed_log_replay = 2; diff --git a/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto b/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto index 60cf77a0a4d..1c373ee649e 100644 --- a/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto +++ b/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto @@ -26,6 +26,7 @@ option java_generate_equals_and_hash = true; option optimize_for = SPEED; import "HBase.proto"; +import "Master.proto"; import "ClusterStatus.proto"; message RegionServerStartupRequest { @@ -126,6 +127,20 @@ message ReportRegionStateTransitionResponse { optional string error_message = 1; } +/** + * Splits the specified region. + */ +message SplitTableRegionRequest { + required RegionInfo region_info = 1; + required bytes split_row = 2; + optional uint64 nonce_group = 3 [default = 0]; + optional uint64 nonce = 4 [default = 0]; +} + +message SplitTableRegionResponse { + optional uint64 proc_id = 1; +} + service RegionServerStatusService { /** Called when a region server first starts. */ rpc RegionServerStartup(RegionServerStartupRequest) @@ -155,4 +170,16 @@ service RegionServerStatusService { */ rpc ReportRegionStateTransition(ReportRegionStateTransitionRequest) returns(ReportRegionStateTransitionResponse); + + /** + * Split region + */ + rpc SplitRegion(SplitTableRegionRequest) + returns(SplitTableRegionResponse); + + /** + * Get procedure result + */ + rpc getProcedureResult(GetProcedureResultRequest) + returns(GetProcedureResultResponse); } diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java index ed07c5c7dc2..70167bbe131 100644 --- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java +++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.MasterSwitchType; +import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.constraint.ConstraintException; import org.apache.hadoop.hbase.coprocessor.CoprocessorService; import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; @@ -80,7 +81,6 @@ import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RSGroupAdmi import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest; import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse; - @InterfaceAudience.Private public class RSGroupAdminEndpoint extends RSGroupAdminService implements CoprocessorService, Coprocessor, MasterObserver { @@ -1098,4 +1098,42 @@ public class RSGroupAdminEndpoint extends RSGroupAdminService public void postListProcedures(ObserverContext ctx, List procInfoList) throws IOException { } + + @Override + public void preSplitRegion( + final ObserverContext c, + final TableName tableName, + final byte[] splitRow) throws IOException { + } + + @Override + public void preSplitRegionAction( + final ObserverContext c, + final TableName tableName, + final byte[] splitRow) throws IOException { + } + + @Override + public void postCompletedSplitRegionAction( + final ObserverContext c, + final HRegionInfo regionInfoA, + final HRegionInfo regionInfoB) throws IOException { + } + + @Override + public void preSplitRegionBeforePONRAction( + final ObserverContext ctx, + final byte[] splitKey, + final List metaEntries) throws IOException { + } + + @Override + public void preSplitRegionAfterPONRAction( + final ObserverContext ctx) throws IOException { + } + + @Override + public void preRollBackSplitRegionAction( + final ObserverContext ctx) throws IOException { + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java index 15c829934cd..21381e8c4ac 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java @@ -37,11 +37,13 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.MasterSwitchType; +import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.master.RegionPlan; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas; +import org.apache.hadoop.hbase.regionserver.Region; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving @@ -794,4 +796,42 @@ public class BaseMasterAndRegionObserver extends BaseRegionObserver public void preRemoveRSGroup(ObserverContext ctx, String name) throws IOException { } + + @Override + public void preSplitRegion( + final ObserverContext c, + final TableName tableName, + final byte[] splitRow) throws IOException { + } + + @Override + public void preSplitRegionAction( + final ObserverContext c, + final TableName tableName, + final byte[] splitRow) throws IOException { + } + + @Override + public void postCompletedSplitRegionAction( + ObserverContext c, + final HRegionInfo regionInfoA, + final HRegionInfo regionInfoB) throws IOException { + } + + @Override + public void preSplitRegionBeforePONRAction( + final ObserverContext ctx, + final byte[] splitKey, + final List metaEntries) throws IOException { + } + + @Override + public void preSplitRegionAfterPONRAction(final ObserverContext ctx) + throws IOException { + } + + @Override + public void preRollBackSplitRegionAction(final ObserverContext ctx) + throws IOException { + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java index 3a07315abf1..4d24a84acc0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java @@ -37,11 +37,13 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.MasterSwitchType; +import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.master.RegionPlan; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas; +import org.apache.hadoop.hbase.regionserver.Region; @InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.CONFIG}) @InterfaceStability.Evolving @@ -816,6 +818,44 @@ public class BaseMasterObserver implements MasterObserver { final boolean newValue, final MasterSwitchType switchType) throws IOException { } + @Override + public void preSplitRegion( + final ObserverContext c, + final TableName tableName, + final byte[] splitRow) throws IOException { + } + + @Override + public void preSplitRegionAction( + final ObserverContext c, + final TableName tableName, + final byte[] splitRow) throws IOException { + } + + @Override + public void postCompletedSplitRegionAction( + final ObserverContext c, + final HRegionInfo regionInfoA, + final HRegionInfo regionInfoB) throws IOException { + } + + @Override + public void preSplitRegionBeforePONRAction( + final ObserverContext ctx, + final byte[] splitKey, + final List metaEntries) throws IOException { + } + + @Override + public void preSplitRegionAfterPONRAction( + final ObserverContext ctx) throws IOException { + } + + @Override + public void preRollBackSplitRegionAction(final ObserverContext ctx) + throws IOException { + } + @Override public void preBalance(ObserverContext ctx) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java index fd795d65a88..e347adfb347 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.MasterSwitchType; +import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.master.RegionPlan; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; @@ -1132,6 +1133,76 @@ public interface MasterObserver extends Coprocessor { void postSetSplitOrMergeEnabled(final ObserverContext ctx, final boolean newValue, final MasterSwitchType switchType) throws IOException; + /** + * Called before the split region procedure is called. + * @param c the environment to interact with the framework and master + * @param tableName the table where the region belongs to + * @param splitRow split point + * @throws IOException if an error occurred on the coprocessor + */ + void preSplitRegion( + final ObserverContext c, + final TableName tableName, + final byte[] splitRow) + throws IOException; + + /** + * Called before the region is split. + * @param c the environment to interact with the framework and master + * @param tableName the table where the region belongs to + * @param splitRow split point + * @throws IOException if an error occurred on the coprocessor + */ + void preSplitRegionAction( + final ObserverContext c, + final TableName tableName, + final byte[] splitRow) + throws IOException; + + /** + * Called after the region is split. + * @param c the environment to interact with the framework and master + * @param regionInfoA the left daughter region + * @param regionInfoB the right daughter region + * @throws IOException if an error occurred on the coprocessor + */ + void postCompletedSplitRegionAction( + final ObserverContext c, + final HRegionInfo regionInfoA, + final HRegionInfo regionInfoB) throws IOException; + + /** + * This will be called before PONR step as part of split transaction. Calling + * {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} rollback the split + * @param ctx the environment to interact with the framework and master + * @param splitKey + * @param metaEntries + * @throws IOException + */ + void preSplitRegionBeforePONRAction( + final ObserverContext ctx, + final byte[] splitKey, + final List metaEntries) throws IOException; + + + /** + * This will be called after PONR step as part of split transaction + * Calling {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} has no + * effect in this hook. + * @param ctx the environment to interact with the framework and master + * @throws IOException + */ + void preSplitRegionAfterPONRAction(final ObserverContext ctx) + throws IOException; + + /** + * This will be called before the roll back of the split region is completed + * @param ctx the environment to interact with the framework and master + * @throws IOException + */ + void preRollBackSplitRegionAction(final ObserverContext ctx) + throws IOException; + /** * Called prior to modifying the flag used to enable/disable region balancing. * @param ctx the coprocessor instance's environment diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java index 4c946440963..aaa3b6af53d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java @@ -359,7 +359,10 @@ public interface RegionObserver extends Coprocessor { * @param c the environment provided by the region server * (e.getRegion() returns the parent region) * @throws IOException if an error occurred on the coprocessor + * + * Note: the logic moves to Master; it is unused in RS */ + @Deprecated void preSplit(final ObserverContext c, byte[] splitRow) throws IOException; @@ -383,32 +386,43 @@ public interface RegionObserver extends Coprocessor { * @param splitKey * @param metaEntries * @throws IOException - */ + * + * Note: the logic moves to Master; it is unused in RS + */ + @Deprecated void preSplitBeforePONR(final ObserverContext ctx, byte[] splitKey, List metaEntries) throws IOException; - /** * This will be called after PONR step as part of split transaction * Calling {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} has no * effect in this hook. * @param ctx * @throws IOException - */ + * + * Note: the logic moves to Master; it is unused in RS + */ + @Deprecated void preSplitAfterPONR(final ObserverContext ctx) throws IOException; /** * This will be called before the roll back of the split region is completed * @param ctx * @throws IOException - */ + * + * Note: the logic moves to Master; it is unused in RS + */ + @Deprecated void preRollBackSplit(final ObserverContext ctx) throws IOException; /** * This will be called after the roll back of the split region is completed * @param ctx * @throws IOException - */ + * + * Note: the logic moves to Master; it is unused in RS + */ + @Deprecated void postRollBackSplit(final ObserverContext ctx) throws IOException; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java index 82ae03fbe88..c8c25f1c1d6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java @@ -61,7 +61,6 @@ import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.RegionStateListener; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.MasterSwitchType; import org.apache.hadoop.hbase.client.RegionReplicaUtil; @@ -2432,6 +2431,39 @@ public class AssignmentManager { return null; } + public void assignDaughterRegions( + final HRegionInfo parentHRI, + final HRegionInfo daughterAHRI, + final HRegionInfo daughterBHRI) throws InterruptedException, IOException { + //Offline the parent region + regionOffline(parentHRI, State.SPLIT); + + //Set daughter regions to offline + regionStates.prepareAssignDaughters(daughterAHRI, daughterBHRI); + + // Assign daughter regions + invokeAssign(daughterAHRI); + invokeAssign(daughterBHRI); + + Callable splitReplicasCallable = new Callable() { + @Override + public Object call() { + doSplittingOfReplicas(parentHRI, daughterAHRI, daughterBHRI); + return null; + } + }; + threadPoolExecutorService.submit(splitReplicasCallable); + + // wait for assignment completion + ArrayList regionAssignSet = new ArrayList(2); + regionAssignSet.add(daughterAHRI); + regionAssignSet.add(daughterBHRI); + while (!waitForAssignment(regionAssignSet, true, regionAssignSet.size(), + Long.MAX_VALUE)) { + LOG.debug("some user regions are still in transition: " + regionAssignSet); + } + } + private String onRegionSplit(final RegionState current, final HRegionInfo hri, final ServerName serverName, final RegionStateTransition transition) { // The region must be splitting on this server, and the daughters must be in @@ -2866,7 +2898,7 @@ public class AssignmentManager { * (d) Other scenarios should be handled similarly as for * region open/close */ - protected String onRegionTransition(final ServerName serverName, + public String onRegionTransition(final ServerName serverName, final RegionStateTransition transition) { TransitionCode code = transition.getTransitionCode(); HRegionInfo hri = HRegionInfo.convert(transition.getRegionInfo(0)); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index eac2fa22f8d..beec198fff9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -111,6 +111,7 @@ import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.master.procedure.ModifyColumnFamilyProcedure; import org.apache.hadoop.hbase.master.procedure.ModifyTableProcedure; import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch; +import org.apache.hadoop.hbase.master.procedure.SplitTableRegionProcedure; import org.apache.hadoop.hbase.master.procedure.TruncateTableProcedure; import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; import org.apache.hadoop.hbase.mob.MobConstants; @@ -1352,6 +1353,30 @@ public class HMaster extends HRegionServer implements MasterServices { return procId; } + @Override + public long splitRegion( + final HRegionInfo regionInfo, + final byte[] splitRow, + final long nonceGroup, + final long nonce) throws IOException { + checkInitialized(); + + if (cpHost != null) { + cpHost.preSplitRegion(regionInfo.getTable(), splitRow); + } + + LOG.info(getClientIdAuditPrefix() + " Split region " + regionInfo); + + // Execute the operation asynchronously + long procId = this.procedureExecutor.submitProcedure( + new SplitTableRegionProcedure( + procedureExecutor.getEnvironment(), regionInfo.getTable(), regionInfo, splitRow), + nonceGroup, + nonce); + + return procId; + } + void move(final byte[] encodedRegionName, final byte[] destServerName) throws HBaseIOException { RegionState regionState = assignmentManager.getRegionStates(). diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java index 9eec0dd9fa9..d0ac7654470 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.MasterSwitchType; +import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.CoprocessorService; import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; @@ -833,6 +834,114 @@ public class MasterCoprocessorHost }); } + /** + * Invoked just before calling the split region procedure + * @param tableName the table where the region belongs to + * @param splitRow the split point + * @throws IOException + */ + public void preSplitRegion( + final TableName tableName, + final byte[] splitRow) throws IOException { + execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + @Override + public void call(MasterObserver oserver, ObserverContext ctx) + throws IOException { + oserver.preSplitRegion(ctx, tableName, splitRow); + } + }); + } + + /** + * Invoked just before a split + * @param tableName the table where the region belongs to + * @param splitRow the split point + * @param user the user + * @throws IOException + */ + public void preSplitRegionAction( + final TableName tableName, + final byte[] splitRow, + final User user) throws IOException { + execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation(user) { + @Override + public void call(MasterObserver oserver, ObserverContext ctx) + throws IOException { + oserver.preSplitRegionAction(ctx, tableName, splitRow); + } + }); + } + + /** + * Invoked just after a split + * @param regionInfoA the new left-hand daughter region + * @param regionInfoB the new right-hand daughter region + * @param user the user + * @throws IOException + */ + public void postCompletedSplitRegionAction( + final HRegionInfo regionInfoA, + final HRegionInfo regionInfoB, + final User user) throws IOException { + execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation(user) { + @Override + public void call(MasterObserver oserver, ObserverContext ctx) + throws IOException { + oserver.postCompletedSplitRegionAction(ctx, regionInfoA, regionInfoB); + } + }); + } + + /** + * This will be called before PONR step as part of split table region procedure. + * @param splitKey + * @param metaEntries + * @param user the user + * @throws IOException + */ + public boolean preSplitBeforePONRAction( + final byte[] splitKey, + final List metaEntries, + final User user) throws IOException { + return execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation(user) { + @Override + public void call(MasterObserver oserver, ObserverContext ctx) + throws IOException { + oserver.preSplitRegionBeforePONRAction(ctx, splitKey, metaEntries); + } + }); + } + + /** + * This will be called after PONR step as part of split table region procedure. + * @param user the user + * @throws IOException + */ + public void preSplitAfterPONRAction(final User user) throws IOException { + execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation(user) { + @Override + public void call(MasterObserver oserver, ObserverContext ctx) + throws IOException { + oserver.preSplitRegionAfterPONRAction(ctx); + } + }); + } + + /** + * Invoked just before the rollback of a failed split is started + * @param user the user + * @throws IOException + */ + public void preRollBackSplitAction(final User user) throws IOException { + execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation(user) { + @Override + public void call(MasterObserver oserver, ObserverContext ctx) + throws IOException { + oserver.preRollBackSplitRegionAction(ctx); + } + }); + } + public boolean preBalanceSwitch(final boolean b) throws IOException { return execOperationWithResult(b, coprocessors.isEmpty() ? null : new CoprocessorOperationWithResult() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index 7cc58aada9c..94b5f56ad64 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -83,6 +83,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProto import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse; import org.apache.hadoop.hbase.regionserver.RSRpcServices; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.access.AccessController; @@ -549,6 +551,22 @@ public class MasterRpcServices extends RSRpcServices } } + @Override + public SplitTableRegionResponse splitRegion( + final RpcController controller, + final SplitTableRegionRequest request) throws ServiceException { + try { + long procId = master.splitRegion( + HRegionInfo.convert(request.getRegionInfo()), + request.getSplitRow().toByteArray(), + request.getNonceGroup(), + request.getNonce()); + return SplitTableRegionResponse.newBuilder().setProcId(procId).build(); + } catch (IOException ie) { + throw new ServiceException(ie); + } + } + @Override public ClientProtos.CoprocessorServiceResponse execMasterService(final RpcController controller, final ClientProtos.CoprocessorServiceRequest request) throws ServiceException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java index 9bdcf76a61d..fa1c33d79cf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java @@ -264,6 +264,21 @@ public interface MasterServices extends Server { final long nonce) throws IOException; + /** + * Split a region. + * @param regionInfo region to split + * @param splitRow split point + * @param nonceGroup used to detect duplicate + * @param nonce used to detect duplicate + * @return procedure Id + * @throws IOException + */ + public long splitRegion( + final HRegionInfo regionInfo, + final byte [] splitRow, + final long nonceGroup, + final long nonce) throws IOException; + /** * @return Return table descriptors implementation. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java index ba08a059817..f69925ac377 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java @@ -51,6 +51,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.master.RegionState.State; import org.apache.hadoop.hbase.client.TableState; +import org.apache.hadoop.hbase.constraint.ConstraintException; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.Pair; @@ -873,6 +874,17 @@ public class RegionStates { return regions == null ? false : regions.contains(hri); } + public void prepareAssignDaughters(HRegionInfo a, HRegionInfo b) { + synchronized (this) { + if (isRegionInState(a, State.SPLITTING_NEW)) { + updateRegionState(a, State.OFFLINE, null); + } + if (isRegionInState(b, State.SPLITTING_NEW)) { + updateRegionState(b, State.OFFLINE, null); + } + } + } + void splitRegion(HRegionInfo p, HRegionInfo a, HRegionInfo b, ServerName sn) throws IOException { @@ -1032,7 +1044,7 @@ public class RegionStates { return result; } - protected RegionState getRegionState(final HRegionInfo hri) { + public RegionState getRegionState(final HRegionInfo hri) { return getRegionState(hri.getEncodedName()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java index 437c787727f..278030fb738 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java @@ -80,7 +80,6 @@ import org.apache.zookeeper.KeeperException; import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString; import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException; import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations; @@ -828,6 +827,31 @@ public class ServerManager { return sendRegionClose(server, region, null); } + /** + * Sends an CLOSE RPC to the specified server to close the specified region for SPLIT. + *

+ * A region server could reject the close request because it either does not + * have the specified region or the region is being split. + * @param server server to close a region + * @param regionToClose the info of the region to close + * @throws IOException + */ + public boolean sendRegionCloseForSplit( + final ServerName server, + final HRegionInfo regionToClose) throws IOException { + if (server == null) { + throw new NullPointerException("Passed server is null"); + } + AdminService.BlockingInterface admin = getRsAdmin(server); + if (admin == null) { + throw new IOException("Attempting to send CLOSE For Split RPC to server " + + server.toString() + " for region " + regionToClose.getRegionNameAsString() + + " failed because no RPC connection found to this server"); + } + HBaseRpcController controller = newRpcController(); + return ProtobufUtil.closeRegionForSplit(controller, admin, server, regionToClose); + } + /** * Sends a WARMUP RPC to the specified server to warmup the specified region. *

diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SplitTableRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SplitTableRegionProcedure.java new file mode 100644 index 00000000000..61f7601222e --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SplitTableRegionProcedure.java @@ -0,0 +1,821 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.procedure; + +import java.io.IOException; +import java.io.InputStream; +import java.io.InterruptedIOException; +import java.io.OutputStream; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.Mutation; +import org.apache.hadoop.hbase.io.hfile.CacheConfig; +import org.apache.hadoop.hbase.master.MasterCoprocessorHost; +import org.apache.hadoop.hbase.master.MasterFileSystem; +import org.apache.hadoop.hbase.master.RegionState; +import org.apache.hadoop.hbase.master.RegionStates; +import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionState; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode; +import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; +import org.apache.hadoop.hbase.regionserver.HStore; +import org.apache.hadoop.hbase.regionserver.StoreFile; +import org.apache.hadoop.hbase.regionserver.StoreFileInfo; +import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.hbase.util.Threads; + +import com.google.common.annotations.VisibleForTesting; + +/** + * The procedure to split a region in a table. + */ +@InterfaceAudience.Private +public class SplitTableRegionProcedure +extends AbstractStateMachineTableProcedure { + private static final Log LOG = LogFactory.getLog(SplitTableRegionProcedure.class); + + private Boolean traceEnabled; + private User user; + private TableName tableName; + + /* + * Region to split + */ + private HRegionInfo parentHRI; + private HRegionInfo daughter_1_HRI; + private HRegionInfo daughter_2_HRI; + + /* + * Row to split around + */ + private byte [] splitRow; + + public SplitTableRegionProcedure() { + this.traceEnabled = null; + } + + public SplitTableRegionProcedure( + final MasterProcedureEnv env, + final TableName tableName, + final HRegionInfo parentHRI, + final byte [] splitRow) throws IOException { + this.traceEnabled = null; + this.tableName = tableName; + this.parentHRI = parentHRI; + this.splitRow = splitRow; + + this.user = env.getRequestUser(); + this.setOwner(this.user.getShortName()); + } + + @Override + protected Flow executeFromState( + final MasterProcedureEnv env, + final SplitTableRegionState state) throws InterruptedException { + if (isTraceEnabled()) { + LOG.trace(this + " execute state=" + state); + } + + try { + switch (state) { + case SPLIT_TABLE_REGION_PREPARE: + if (prepareSplitRegion(env)) { + setNextState(SplitTableRegionState.SPLIT_TABLE_REGION_PRE_OPERATION); + break; + } else { + assert isFailed() : "split region should have an exception here"; + return Flow.NO_MORE_STATE; + } + case SPLIT_TABLE_REGION_PRE_OPERATION: + preSplitRegion(env); + setNextState(SplitTableRegionState.SPLIT_TABLE_REGION_SET_SPLITTING_TABLE_STATE); + break; + case SPLIT_TABLE_REGION_SET_SPLITTING_TABLE_STATE: + setRegionStateToSplitting(env); + setNextState(SplitTableRegionState.SPLIT_TABLE_REGION_CLOSED_PARENT_REGION); + break; + case SPLIT_TABLE_REGION_CLOSED_PARENT_REGION: + closeParentRegionForSplit(env); + setNextState(SplitTableRegionState.SPLIT_TABLE_REGION_CREATE_DAUGHTER_REGIONS); + break; + case SPLIT_TABLE_REGION_CREATE_DAUGHTER_REGIONS: + createDaughterRegions(env); + setNextState(SplitTableRegionState.SPLIT_TABLE_REGION_PRE_OPERATION_BEFORE_PONR); + break; + case SPLIT_TABLE_REGION_PRE_OPERATION_BEFORE_PONR: + preSplitRegionBeforePONR(env); + setNextState(SplitTableRegionState.SPLIT_TABLE_REGION_UPDATE_META); + break; + case SPLIT_TABLE_REGION_UPDATE_META: + // This is the point of no return. Adding subsequent edits to .META. as we + // do below when we do the daughter opens adding each to .META. can fail in + // various interesting ways the most interesting of which is a timeout + // BUT the edits all go through (See HBASE-3872). IF we reach the PONR + // then subsequent failures need to crash out this region server; the + // server shutdown processing should be able to fix-up the incomplete split. + // The offlined parent will have the daughters as extra columns. If + // we leave the daughter regions in place and do not remove them when we + // crash out, then they will have their references to the parent in place + // still and the server shutdown fixup of .META. will point to these + // regions. + // We should add PONR JournalEntry before offlineParentInMeta,so even if + // OfflineParentInMeta timeout,this will cause regionserver exit,and then + // master ServerShutdownHandler will fix daughter & avoid data loss. (See + // HBase-4562). + updateMetaForDaughterRegions(env); + setNextState(SplitTableRegionState.SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_PONR); + break; + case SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_PONR: + preSplitRegionAfterPONR(env); + setNextState(SplitTableRegionState.SPLIT_TABLE_REGION_OPEN_CHILD_REGIONS); + break; + case SPLIT_TABLE_REGION_OPEN_CHILD_REGIONS: + openDaughterRegions(env); + setNextState(SplitTableRegionState.SPLIT_TABLE_REGION_POST_OPERATION); + break; + case SPLIT_TABLE_REGION_POST_OPERATION: + postSplitRegion(env); + return Flow.NO_MORE_STATE; + default: + throw new UnsupportedOperationException(this + " unhandled state=" + state); + } + } catch (IOException e) { + String msg = "Error trying to split region " + parentHRI.getEncodedName() + " in the table " + + tableName + " (in state=" + state + ")"; + if (!isRollbackSupported(state)) { + // We reach a state that cannot be rolled back. We just need to keep retry. + LOG.warn(msg, e); + } else { + LOG.error(msg, e); + setFailure("master-split-region", e); + } + } + return Flow.HAS_MORE_STATE; + } + + @Override + protected void rollbackState(final MasterProcedureEnv env, final SplitTableRegionState state) + throws IOException, InterruptedException { + if (isTraceEnabled()) { + LOG.trace(this + " rollback state=" + state); + } + + try { + switch (state) { + case SPLIT_TABLE_REGION_POST_OPERATION: + case SPLIT_TABLE_REGION_OPEN_CHILD_REGIONS: + case SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_PONR: + case SPLIT_TABLE_REGION_UPDATE_META: + // PONR + throw new UnsupportedOperationException(this + " unhandled state=" + state); + case SPLIT_TABLE_REGION_PRE_OPERATION_BEFORE_PONR: + break; + case SPLIT_TABLE_REGION_CREATE_DAUGHTER_REGIONS: + // Doing nothing, as re-open parent region would clean up daughter region directories. + break; + case SPLIT_TABLE_REGION_CLOSED_PARENT_REGION: + openParentRegion(env); + break; + case SPLIT_TABLE_REGION_SET_SPLITTING_TABLE_STATE: + setRegionStateToRevertSplitting(env); + break; + case SPLIT_TABLE_REGION_PRE_OPERATION: + preSplitRegionRollback(env); + break; + case SPLIT_TABLE_REGION_PREPARE: + break; // nothing to do + default: + throw new UnsupportedOperationException(this + " unhandled state=" + state); + } + } catch (IOException e) { + // This will be retried. Unless there is a bug in the code, + // this should be just a "temporary error" (e.g. network down) + LOG.warn("Failed rollback attempt step " + state + " for splitting the region " + + parentHRI.getEncodedName() + " in table " + tableName, e); + throw e; + } + } + + /* + * Check whether we are in the state that can be rollback + */ + @Override + protected boolean isRollbackSupported(final SplitTableRegionState state) { + switch (state) { + case SPLIT_TABLE_REGION_POST_OPERATION: + case SPLIT_TABLE_REGION_OPEN_CHILD_REGIONS: + case SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_PONR: + case SPLIT_TABLE_REGION_UPDATE_META: + // It is not safe to rollback if we reach to these states. + return false; + default: + break; + } + return true; + } + + @Override + protected SplitTableRegionState getState(final int stateId) { + return SplitTableRegionState.valueOf(stateId); + } + + @Override + protected int getStateId(final SplitTableRegionState state) { + return state.getNumber(); + } + + @Override + protected SplitTableRegionState getInitialState() { + return SplitTableRegionState.SPLIT_TABLE_REGION_PREPARE; + } + + @Override + public void serializeStateData(final OutputStream stream) throws IOException { + super.serializeStateData(stream); + + MasterProcedureProtos.SplitTableRegionStateData.Builder splitTableRegionMsg = + MasterProcedureProtos.SplitTableRegionStateData.newBuilder() + .setUserInfo(MasterProcedureUtil.toProtoUserInfo(user)) + .setTableName(ProtobufUtil.toProtoTableName(tableName)) + .setParentRegionInfo(HRegionInfo.convert(parentHRI)); + if (splitRow != null) { + splitTableRegionMsg.setSplitRow(UnsafeByteOperations.unsafeWrap(splitRow)); + } + if (daughter_1_HRI != null) { + splitTableRegionMsg.addChildRegionInfo(HRegionInfo.convert(daughter_1_HRI)); + } + if (daughter_2_HRI != null) { + splitTableRegionMsg.addChildRegionInfo(HRegionInfo.convert(daughter_2_HRI)); + } + splitTableRegionMsg.build().writeDelimitedTo(stream); + } + + @Override + public void deserializeStateData(final InputStream stream) throws IOException { + super.deserializeStateData(stream); + + MasterProcedureProtos.SplitTableRegionStateData splitTableRegionsMsg = + MasterProcedureProtos.SplitTableRegionStateData.parseDelimitedFrom(stream); + user = MasterProcedureUtil.toUserInfo(splitTableRegionsMsg.getUserInfo()); + tableName = ProtobufUtil.toTableName(splitTableRegionsMsg.getTableName()); + parentHRI = HRegionInfo.convert(splitTableRegionsMsg.getParentRegionInfo()); + if (splitTableRegionsMsg.hasSplitRow()) { + splitRow = splitTableRegionsMsg.getSplitRow().toByteArray(); + } else { + splitRow = null; + } + if (splitTableRegionsMsg.getChildRegionInfoCount() == 0) { + daughter_1_HRI = daughter_2_HRI = null; + } else { + assert(splitTableRegionsMsg.getChildRegionInfoCount() == 2); + daughter_1_HRI = HRegionInfo.convert(splitTableRegionsMsg.getChildRegionInfoList().get(0)); + daughter_2_HRI = HRegionInfo.convert(splitTableRegionsMsg.getChildRegionInfoList().get(1)); + } + } + + @Override + public void toStringClassDetails(StringBuilder sb) { + sb.append(getClass().getSimpleName()); + sb.append(" (table="); + sb.append(tableName); + sb.append(" parent region="); + sb.append(parentHRI); + if (daughter_1_HRI != null) { + sb.append(" first daughter region="); + sb.append(daughter_1_HRI); + } + if (daughter_2_HRI != null) { + sb.append(" and second daughter region="); + sb.append(daughter_2_HRI); + } + sb.append(")"); + } + + @Override + protected boolean acquireLock(final MasterProcedureEnv env) { + if (env.waitInitialized(this)) { + return false; + } + return !env.getProcedureQueue().waitRegions(this, getTableName(), parentHRI); + } + + @Override + protected void releaseLock(final MasterProcedureEnv env) { + env.getProcedureQueue().wakeRegions(this, getTableName(), parentHRI); + } + + @Override + public TableName getTableName() { + return tableName; + } + + @Override + public TableOperationType getTableOperationType() { + return TableOperationType.SPLIT; + } + + /** + * Prepare to Split region. + * @param env MasterProcedureEnv + * @throws IOException + */ + @VisibleForTesting + public Boolean prepareSplitRegion(final MasterProcedureEnv env) throws IOException { + // Check whether the region is splittable + RegionState state = getParentRegionState(env); + if (state.isClosing() || + state.isClosed() || + state.isSplittingOrSplitOnServer(state.getServerName())) { + setFailure( + "master-split-region", + new IOException("Split region " + parentHRI + " failed due to region is not splittable")); + return false; + } + // Split key can be null if this region is unsplittable; i.e. has refs. + if (this.splitRow == null || this.splitRow.length == 0) { + setFailure( + "master-split-region", + new IOException("Split region " + parentHRI + " failed due to invalid split point")); + return false; + } + + // Check splitRow. + byte [] startKey = parentHRI.getStartKey(); + byte [] endKey = parentHRI.getEndKey(); + if (Bytes.equals(startKey, splitRow) || + !this.parentHRI.containsRow(splitRow)) { + String msg = "Split row is not inside region key range or is equal to " + + "startkey: " + Bytes.toStringBinary(this.splitRow); + LOG.warn(msg); + setFailure("master-split-region", new IOException(msg)); + return false; + } + + long rid = getDaughterRegionIdTimestamp(parentHRI); + this.daughter_1_HRI = new HRegionInfo(tableName, startKey, this.splitRow, false, rid); + this.daughter_2_HRI = new HRegionInfo(tableName, this.splitRow, endKey, false, rid); + + return true; + } + + /** + * Action before splitting region in a table. + * @param env MasterProcedureEnv + * @param state the procedure state + * @throws IOException + * @throws InterruptedException + */ + private void preSplitRegion(final MasterProcedureEnv env) + throws IOException, InterruptedException { + final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost(); + if (cpHost != null) { + cpHost.preSplitRegionAction(getTableName(), splitRow, user); + } + } + + /** + * Action during rollback a pre split table region. + * @param env MasterProcedureEnv + * @param state the procedure state + * @throws IOException + */ + private void preSplitRegionRollback(final MasterProcedureEnv env) throws IOException { + final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost(); + if (cpHost != null) { + cpHost.preRollBackSplitAction(user); + } + } + + /** + * Set the parent region state to SPLITTING state + * @param env MasterProcedureEnv + * @throws IOException + */ + @VisibleForTesting + public void setRegionStateToSplitting(final MasterProcedureEnv env) throws IOException { + RegionStateTransition.Builder transition = RegionStateTransition.newBuilder(); + transition.setTransitionCode(TransitionCode.READY_TO_SPLIT); + transition.addRegionInfo(HRegionInfo.convert(parentHRI)); + transition.addRegionInfo(HRegionInfo.convert(daughter_1_HRI)); + transition.addRegionInfo(HRegionInfo.convert(daughter_2_HRI)); + if (env.getMasterServices().getAssignmentManager().onRegionTransition( + getParentRegionState(env).getServerName(), transition.build()) != null) { + throw new IOException("Failed to update region state to SPLITTING for " + + parentHRI.getRegionNameAsString()); + } + } + + /** + * Rollback the region state change + * @param env MasterProcedureEnv + * @throws IOException + */ + private void setRegionStateToRevertSplitting(final MasterProcedureEnv env) throws IOException { + RegionStateTransition.Builder transition = RegionStateTransition.newBuilder(); + transition.setTransitionCode(TransitionCode.SPLIT_REVERTED); + transition.addRegionInfo(HRegionInfo.convert(parentHRI)); + transition.addRegionInfo(HRegionInfo.convert(daughter_1_HRI)); + transition.addRegionInfo(HRegionInfo.convert(daughter_2_HRI)); + if (env.getMasterServices().getAssignmentManager().onRegionTransition( + getParentRegionState(env).getServerName(), transition.build()) != null) { + throw new IOException("Failed to update region state for " + + parentHRI.getRegionNameAsString() + " as part of operation for reverting split"); + } + } + + /** + * RPC to region server that host the parent region, ask for close the parent regions and + * creating daughter regions + * @param env MasterProcedureEnv + * @throws IOException + */ + @VisibleForTesting + public void closeParentRegionForSplit(final MasterProcedureEnv env) throws IOException { + Boolean success = env.getMasterServices().getServerManager().sendRegionCloseForSplit( + getParentRegionState(env).getServerName(), parentHRI); + if (!success) { + throw new IOException("Close parent region " + parentHRI + " for splitting failed." + + " Check region server log for more details"); + } + } + + /** + * Rollback close parent region + * @param env MasterProcedureEnv + **/ + private void openParentRegion(final MasterProcedureEnv env) throws IOException { + // Check whether the region is closed; if so, open it in the same server + RegionState state = getParentRegionState(env); + if (state.isClosing() || state.isClosed()) { + env.getMasterServices().getServerManager().sendRegionOpen( + getParentRegionState(env).getServerName(), + parentHRI, + ServerName.EMPTY_SERVER_LIST); + } + } + + /** + * Create daughter regions + * @param env MasterProcedureEnv + * @throws IOException + */ + @VisibleForTesting + public void createDaughterRegions(final MasterProcedureEnv env) throws IOException { + final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem(); + final Path tabledir = FSUtils.getTableDir(mfs.getRootDir(), parentHRI.getTable()); + final FileSystem fs = mfs.getFileSystem(); + HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem( + env.getMasterConfiguration(), fs, tabledir, parentHRI, false); + regionFs.createSplitsDir(); + + Pair expectedReferences = splitStoreFiles(env, regionFs); + + assertReferenceFileCount( + fs, expectedReferences.getFirst(), regionFs.getSplitsDir(daughter_1_HRI)); + //Move the files from the temporary .splits to the final /table/region directory + regionFs.commitDaughterRegion(daughter_1_HRI); + assertReferenceFileCount( + fs, + expectedReferences.getFirst(), + new Path(tabledir, daughter_1_HRI.getEncodedName())); + + assertReferenceFileCount( + fs, expectedReferences.getSecond(), regionFs.getSplitsDir(daughter_2_HRI)); + regionFs.commitDaughterRegion(daughter_2_HRI); + assertReferenceFileCount( + fs, + expectedReferences.getSecond(), + new Path(tabledir, daughter_2_HRI.getEncodedName())); + } + + /** + * Create Split directory + * @param env MasterProcedureEnv + * @throws IOException + */ + private Pair splitStoreFiles( + final MasterProcedureEnv env, + final HRegionFileSystem regionFs) throws IOException { + final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem(); + final Configuration conf = env.getMasterConfiguration(); + + // The following code sets up a thread pool executor with as many slots as + // there's files to split. It then fires up everything, waits for + // completion and finally checks for any exception + // + // Note: splitStoreFiles creates daughter region dirs under the parent splits dir + // Nothing to unroll here if failure -- re-run createSplitsDir will + // clean this up. + int nbFiles = 0; + Collection storeFiles; + for (String family: regionFs.getFamilies()) { + storeFiles = regionFs.getStoreFiles(family); + if (storeFiles != null) { + nbFiles += storeFiles.size(); + } + } + if (nbFiles == 0) { + // no file needs to be splitted. + return new Pair(0,0); + } + // Default max #threads to use is the smaller of table's configured number of blocking store + // files or the available number of logical cores. + int defMaxThreads = Math.min( + conf.getInt(HStore.BLOCKING_STOREFILES_KEY, HStore.DEFAULT_BLOCKING_STOREFILE_COUNT), + Runtime.getRuntime().availableProcessors()); + // Max #threads is the smaller of the number of storefiles or the default max determined above. + int maxThreads = Math.min( + conf.getInt(HConstants.REGION_SPLIT_THREADS_MAX, defMaxThreads), nbFiles); + LOG.info("Preparing to split " + nbFiles + " storefiles for region " + parentHRI + + " using " + maxThreads + " threads"); + ThreadPoolExecutor threadPool = (ThreadPoolExecutor) Executors.newFixedThreadPool( + maxThreads, Threads.getNamedThreadFactory("StoreFileSplitter-%1$d")); + List>> futures = new ArrayList>> (nbFiles); + + // Split each store file. + for (String family: regionFs.getFamilies()) { + HColumnDescriptor hcd = + env.getMasterServices().getTableDescriptors().get(tableName).getFamily(family.getBytes()); + CacheConfig cacheConf = new CacheConfig(conf, hcd); + storeFiles = regionFs.getStoreFiles(family); + if (storeFiles != null) { + for (StoreFileInfo storeFileInfo: storeFiles) { + StoreFileSplitter sfs = new StoreFileSplitter( + regionFs, + family.getBytes(), + new StoreFile( + mfs.getFileSystem(), storeFileInfo, conf, cacheConf, hcd.getBloomFilterType())); + futures.add(threadPool.submit(sfs)); + } + } + } + // Shutdown the pool + threadPool.shutdown(); + + // Wait for all the tasks to finish + long fileSplitTimeout = conf.getLong("hbase.master.fileSplitTimeout", 30000); + try { + boolean stillRunning = !threadPool.awaitTermination(fileSplitTimeout, TimeUnit.MILLISECONDS); + if (stillRunning) { + threadPool.shutdownNow(); + // wait for the thread to shutdown completely. + while (!threadPool.isTerminated()) { + Thread.sleep(50); + } + throw new IOException("Took too long to split the" + + " files and create the references, aborting split"); + } + } catch (InterruptedException e) { + throw (InterruptedIOException)new InterruptedIOException().initCause(e); + } + + int daughterA = 0; + int daughterB = 0; + // Look for any exception + for (Future> future : futures) { + try { + Pair p = future.get(); + daughterA += p.getFirst() != null ? 1 : 0; + daughterB += p.getSecond() != null ? 1 : 0; + } catch (InterruptedException e) { + throw (InterruptedIOException) new InterruptedIOException().initCause(e); + } catch (ExecutionException e) { + throw new IOException(e); + } + } + + if (LOG.isDebugEnabled()) { + LOG.debug("Split storefiles for region " + parentHRI + " Daughter A: " + daughterA + + " storefiles, Daughter B: " + daughterB + " storefiles."); + } + return new Pair(daughterA, daughterB); + } + + private void assertReferenceFileCount( + final FileSystem fs, + final int expectedReferenceFileCount, + final Path dir) + throws IOException { + if (expectedReferenceFileCount != 0 && + expectedReferenceFileCount != FSUtils.getRegionReferenceFileCount(fs, dir)) { + throw new IOException("Failing split. Expected reference file count isn't equal."); + } + } + + private Pair splitStoreFile( + final HRegionFileSystem regionFs, + final byte[] family, + final StoreFile sf) + throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("Splitting started for store file: " + sf.getPath() + " for region: " + parentHRI); + } + + String familyName = Bytes.toString(family); + Path path_first = + regionFs.splitStoreFile(this.daughter_1_HRI, familyName, sf, this.splitRow, false, null); + Path path_second = + regionFs.splitStoreFile(this.daughter_2_HRI, familyName, sf, this.splitRow, true, null); + if (LOG.isDebugEnabled()) { + LOG.debug("Splitting complete for store file: " + sf.getPath() + " for region: " + parentHRI); + } + return new Pair(path_first, path_second); + } + + /** + * Utility class used to do the file splitting / reference writing + * in parallel instead of sequentially. + */ + private class StoreFileSplitter implements Callable> { + private final HRegionFileSystem regionFs; + private final byte[] family; + private final StoreFile sf; + + /** + * Constructor that takes what it needs to split + * @param regionFs the file system + * @param family Family that contains the store file + * @param sf which file + */ + public StoreFileSplitter( + final HRegionFileSystem regionFs, + final byte[] family, + final StoreFile sf) { + this.regionFs = regionFs; + this.sf = sf; + this.family = family; + } + + public Pair call() throws IOException { + return splitStoreFile(regionFs, family, sf); + } + } + + /** + * Post split region actions before the Point-of-No-Return step + * @param env MasterProcedureEnv + **/ + private void preSplitRegionBeforePONR(final MasterProcedureEnv env) + throws IOException, InterruptedException { + final List metaEntries = new ArrayList(); + boolean ret = false; + final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost(); + if (cpHost != null) { + ret = cpHost.preSplitBeforePONRAction(splitRow, metaEntries, user); + + if (ret) { + throw new IOException("Coprocessor bypassing region " + + parentHRI.getRegionNameAsString() + " split."); + } + try { + for (Mutation p : metaEntries) { + HRegionInfo.parseRegionName(p.getRow()); + } + } catch (IOException e) { + LOG.error("Row key of mutation from coprossor is not parsable as region name." + + "Mutations from coprocessor should only for hbase:meta table."); + throw e; + } + } + } + + /** + * Add daughter regions to META + * @param env MasterProcedureEnv + * @throws IOException + */ + private void updateMetaForDaughterRegions(final MasterProcedureEnv env) throws IOException { + RegionStateTransition.Builder transition = RegionStateTransition.newBuilder(); + transition.setTransitionCode(TransitionCode.SPLIT_PONR); + transition.addRegionInfo(HRegionInfo.convert(parentHRI)); + transition.addRegionInfo(HRegionInfo.convert(daughter_1_HRI)); + transition.addRegionInfo(HRegionInfo.convert(daughter_2_HRI)); + if (env.getMasterServices().getAssignmentManager().onRegionTransition( + getParentRegionState(env).getServerName(), transition.build()) != null) { + throw new IOException("Failed to update meta to add daughter regions in split region " + + parentHRI.getRegionNameAsString()); + } + } + + /** + * Pre split region actions after the Point-of-No-Return step + * @param env MasterProcedureEnv + **/ + private void preSplitRegionAfterPONR(final MasterProcedureEnv env) + throws IOException, InterruptedException { + final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost(); + if (cpHost != null) { + cpHost.preSplitAfterPONRAction(user); + } + } + + /** + * Assign daughter regions + * @param env MasterProcedureEnv + * @throws IOException + * @throws InterruptedException + **/ + private void openDaughterRegions( + final MasterProcedureEnv env) throws IOException, InterruptedException { + env.getMasterServices().getAssignmentManager().assignDaughterRegions( + parentHRI, daughter_1_HRI, daughter_2_HRI); + } + + /** + * Post split region actions + * @param env MasterProcedureEnv + **/ + private void postSplitRegion(final MasterProcedureEnv env) + throws IOException { + final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost(); + if (cpHost != null) { + cpHost.postCompletedSplitRegionAction(daughter_1_HRI, daughter_2_HRI, user); + } + } + + /** + * Calculate daughter regionid to use. + * @param hri Parent {@link HRegionInfo} + * @return Daughter region id (timestamp) to use. + */ + private long getDaughterRegionIdTimestamp(final HRegionInfo hri) { + long rid = EnvironmentEdgeManager.currentTime(); + // Regionid is timestamp. Can't be less than that of parent else will insert + // at wrong location in hbase:meta (See HBASE-710). + if (rid < hri.getRegionId()) { + LOG.warn("Clock skew: parent regions id is " + hri.getRegionId() + + " but current time here is " + rid); + rid = hri.getRegionId() + 1; + } + return rid; + } + + /** + * Get parent region state + * @param env MasterProcedureEnv + * @return parent region state + */ + private RegionState getParentRegionState(final MasterProcedureEnv env) { + RegionStates regionStates = env.getMasterServices().getAssignmentManager().getRegionStates(); + RegionState state = regionStates.getRegionState(parentHRI); + if (state == null) { + LOG.warn("Split but not in region states: " + parentHRI); + state = regionStates.createRegionState(parentHRI); + } + return state; + } + + /** + * The procedure could be restarted from a different machine. If the variable is null, we need to + * retrieve it. + * @return traceEnabled + */ + private Boolean isTraceEnabled() { + if (traceEnabled == null) { + traceEnabled = LOG.isTraceEnabled(); + } + return traceEnabled; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 2c25efde4e9..99c389d8f21 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -6649,7 +6649,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi * @param hri Spec. for daughter region to open. * @throws IOException */ - HRegion createDaughterRegionFromSplits(final HRegionInfo hri) throws IOException { + public HRegion createDaughterRegionFromSplits(final HRegionInfo hri) throws IOException { // Move the files from the temporary .splits to the final /table/region directory fs.commitDaughterRegion(hri); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java index 12e93c23fff..50382a442b3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java @@ -480,7 +480,7 @@ public class HRegionFileSystem { return new Path(getRegionDir(), REGION_SPLITS_DIR); } - Path getSplitsDir(final HRegionInfo hri) { + public Path getSplitsDir(final HRegionInfo hri) { return new Path(getSplitsDir(), hri.getEncodedName()); } @@ -539,7 +539,7 @@ public class HRegionFileSystem { * @param regionInfo daughter {@link org.apache.hadoop.hbase.HRegionInfo} * @throws IOException */ - Path commitDaughterRegion(final HRegionInfo regionInfo) + public Path commitDaughterRegion(final HRegionInfo regionInfo) throws IOException { Path regionDir = new Path(this.tableDir, regionInfo.getEncodedName()); Path daughterTmpDir = this.getSplitsDir(regionInfo); @@ -563,7 +563,7 @@ public class HRegionFileSystem { /** * Create the region splits directory. */ - void createSplitsDir() throws IOException { + public void createSplitsDir() throws IOException { Path splitdir = getSplitsDir(); if (fs.exists(splitdir)) { LOG.info("The " + splitdir + " directory exists. Hence deleting it to recreate it"); @@ -590,12 +590,15 @@ public class HRegionFileSystem { * @return Path to created reference. * @throws IOException */ - Path splitStoreFile(final HRegionInfo hri, final String familyName, final StoreFile f, + public Path splitStoreFile(final HRegionInfo hri, final String familyName, final StoreFile f, final byte[] splitRow, final boolean top, RegionSplitPolicy splitPolicy) throws IOException { if (splitPolicy == null || !splitPolicy.skipStoreFileRangeCheck(familyName)) { // Check whether the split row lies in the range of the store file // If it is outside the range, return directly. + if (f.getReader() == null) { + f.createReader(); + } try { if (top) { //check if larger than last key. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 8bea130d2f2..495aace114c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -83,6 +83,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionUtils; +import org.apache.hadoop.hbase.client.NonceGenerator; import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory; import org.apache.hadoop.hbase.conf.ConfigurationManager; import org.apache.hadoop.hbase.conf.ConfigurationObserver; @@ -142,6 +143,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringP import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionServerInfo; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest; @@ -153,6 +156,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProto import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse; import org.apache.hadoop.hbase.trace.SpanReceiverHost; import org.apache.hadoop.hbase.util.Addressing; import org.apache.hadoop.hbase.util.Bytes; @@ -160,6 +165,7 @@ import org.apache.hadoop.hbase.util.CompressionTest; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.hadoop.hbase.util.ForeignExceptionUtil; import org.apache.hadoop.hbase.util.HasThread; import org.apache.hadoop.hbase.util.JSONBean; import org.apache.hadoop.hbase.util.JvmPauseMonitor; @@ -2099,6 +2105,81 @@ public class HRegionServer extends HasThread implements return false; } + @Override + public long requestRegionSplit(final HRegionInfo regionInfo, final byte[] splitRow) { + NonceGenerator ng = clusterConnection.getNonceGenerator(); + final long nonceGroup = ng.getNonceGroup(); + final long nonce = ng.newNonce(); + long procId = -1; + SplitTableRegionRequest request = + RequestConverter.buildSplitTableRegionRequest(regionInfo, splitRow, nonceGroup, nonce); + + while (keepLooping()) { + RegionServerStatusService.BlockingInterface rss = rssStub; + try { + if (rss == null) { + createRegionServerStatusStub(); + continue; + } + SplitTableRegionResponse response = rss.splitRegion(null, request); + + //TODO: should we limit the retry number before quitting? + if (response == null || (procId = response.getProcId()) == -1) { + LOG.warn("Failed to split " + regionInfo + " retrying..."); + continue; + } + + break; + } catch (ServiceException se) { + // TODO: retry or just fail + IOException ioe = ProtobufUtil.getRemoteException(se); + LOG.info("Failed to split region, will retry", ioe); + if (rssStub == rss) { + rssStub = null; + } + } + } + return procId; + } + + @Override + public boolean isProcedureFinished(final long procId) throws IOException { + GetProcedureResultRequest request = + GetProcedureResultRequest.newBuilder().setProcId(procId).build(); + + while (keepLooping()) { + RegionServerStatusService.BlockingInterface rss = rssStub; + try { + if (rss == null) { + createRegionServerStatusStub(); + continue; + } + // TODO: find a way to get proc result + GetProcedureResultResponse response = rss.getProcedureResult(null, request); + + if (response == null) { + LOG.warn("Failed to get procedure (id=" + procId + ") status."); + return false; + } else if (response.getState() == GetProcedureResultResponse.State.RUNNING) { + return false; + } else if (response.hasException()) { + // Procedure failed. + throw ForeignExceptionUtil.toIOException(response.getException()); + } + // Procedure completes successfully + break; + } catch (ServiceException se) { + // TODO: retry or just fail + IOException ioe = ProtobufUtil.getRemoteException(se); + LOG.warn("Failed to get split region procedure result. Retrying", ioe); + if (rssStub == rss) { + rssStub = null; + } + } + } + return true; + } + /** * Trigger a flush in the primary region replica if this region is a secondary replica. Does not * block this thread. See RegionReplicaFlushHandler for details. @@ -2946,6 +3027,41 @@ public class HRegionServer extends HasThread implements return true; } + /** + * Close and offline the region for split + * + * @param parentRegionEncodedName the name of the region to close + * @return True if closed the region successfully. + * @throws IOException + */ + protected boolean closeAndOfflineRegionForSplit( + final String parentRegionEncodedName) throws IOException { + Region parentRegion = this.getFromOnlineRegions(parentRegionEncodedName); + if (parentRegion != null) { + Map> hstoreFilesToSplit = null; + Exception exceptionToThrow = null; + try{ + hstoreFilesToSplit = ((HRegion)parentRegion).close(false); + } catch (Exception e) { + exceptionToThrow = e; + } + if (exceptionToThrow == null && hstoreFilesToSplit == null) { + // The region was closed by someone else + exceptionToThrow = + new IOException("Failed to close region: already closed by another thread"); + } + + if (exceptionToThrow != null) { + if (exceptionToThrow instanceof IOException) throw (IOException)exceptionToThrow; + throw new IOException(exceptionToThrow); + } + + // Offline the region + this.removeFromOnlineRegions(parentRegion, null); + } + return true; + } + /** * @param regionName * @return HRegion for the passed binary regionName or null if diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index 09c7fb93605..35b2ab09e4f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -93,6 +93,8 @@ import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; import org.apache.hadoop.hbase.ipc.ServerRpcController; import org.apache.hadoop.hbase.master.MasterRpcServices; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest; @@ -1361,6 +1363,33 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } } + @Override + @QosPriority(priority=HConstants.ADMIN_QOS) + public CloseRegionForSplitResponse closeRegionForSplit( + final RpcController controller, + final CloseRegionForSplitRequest request) throws ServiceException { + try { + checkOpen(); + + final String encodedRegionName = ProtobufUtil.getRegionEncodedName(request.getRegion()); + + // Can be null if we're calling close on a region that's not online + final Region parentRegion = regionServer.getFromOnlineRegions(encodedRegionName); + if ((parentRegion != null) && (parentRegion.getCoprocessorHost() != null)) { + parentRegion.getCoprocessorHost().preClose(false); + } + + requestCount.increment(); + LOG.info("Close and offline " + encodedRegionName + " and prepare for split."); + boolean closed = regionServer.closeAndOfflineRegionForSplit(encodedRegionName); + CloseRegionForSplitResponse.Builder builder = + CloseRegionForSplitResponse.newBuilder().setClosed(closed); + return builder.build(); + } catch (IOException ie) { + throw new ServiceException(ie); + } + } + /** * Compact a region on the region server. * diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java index 567664e4f6d..0e4d0fc3192 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java @@ -707,7 +707,7 @@ public class RegionCoprocessorHost * Invoked just before a split * @throws IOException */ - // TODO: Deprecate this + @Deprecated public void preSplit(final User user) throws IOException { execOperation(coprocessors.isEmpty() ? null : new RegionOperation(user) { @Override @@ -721,7 +721,10 @@ public class RegionCoprocessorHost /** * Invoked just before a split * @throws IOException + * + * Note: the logic moves to Master; it is unused in RS */ + @Deprecated public void preSplit(final byte[] splitRow, final User user) throws IOException { execOperation(coprocessors.isEmpty() ? null : new RegionOperation(user) { @Override @@ -737,7 +740,10 @@ public class RegionCoprocessorHost * @param l the new left-hand daughter region * @param r the new right-hand daughter region * @throws IOException + * + * Note: the logic moves to Master; it is unused in RS */ + @Deprecated public void postSplit(final Region l, final Region r, final User user) throws IOException { execOperation(coprocessors.isEmpty() ? null : new RegionOperation(user) { @Override @@ -748,6 +754,10 @@ public class RegionCoprocessorHost }); } + /** + * Note: the logic moves to Master; it is unused in RS + */ + @Deprecated public boolean preSplitBeforePONR(final byte[] splitKey, final List metaEntries, final User user) throws IOException { return execOperation(coprocessors.isEmpty() ? null : new RegionOperation(user) { @@ -759,6 +769,10 @@ public class RegionCoprocessorHost }); } + /** + * Note: the logic moves to Master; it is unused in RS + */ + @Deprecated public void preSplitAfterPONR(final User user) throws IOException { execOperation(coprocessors.isEmpty() ? null : new RegionOperation(user) { @Override @@ -772,7 +786,10 @@ public class RegionCoprocessorHost /** * Invoked just before the rollback of a failed split is started * @throws IOException - */ + * + * Note: the logic moves to Master; it is unused in RS + */ + @Deprecated public void preRollBackSplit(final User user) throws IOException { execOperation(coprocessors.isEmpty() ? null : new RegionOperation(user) { @Override @@ -786,7 +803,10 @@ public class RegionCoprocessorHost /** * Invoked just after the rollback of a failed split is done * @throws IOException - */ + * + * Note: the logic moves to Master; it is unused in RS + */ + @Deprecated public void postRollBackSplit(final User user) throws IOException { execOperation(coprocessors.isEmpty() ? null : new RegionOperation(user) { @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java index 36c8a32fbcb..2fbd340a8b4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java @@ -180,6 +180,16 @@ public interface RegionServerServices extends OnlineRegions, FavoredNodesForRegi @Deprecated boolean reportRegionStateTransition(TransitionCode code, HRegionInfo... hris); + /** + * Notify master that a region wants to be splitted. + */ + long requestRegionSplit(final HRegionInfo regionInfo, final byte[] splitRow); + + /** + * Check with master whether a procedure is completed (either succeed or fail) + */ + boolean isProcedureFinished(final long procId) throws IOException; + /** * Returns a reference to the region server's RPC server */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java index 91a5f377b75..eb9811da2f3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java @@ -19,17 +19,15 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; +import java.security.PrivilegedAction; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.DroppedSnapshotException; -import org.apache.hadoop.hbase.master.TableLockManager.TableLock; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.ipc.RemoteException; -import org.apache.hadoop.util.StringUtils; import com.google.common.base.Preconditions; @@ -43,7 +41,6 @@ class SplitRequest implements Runnable { private final byte[] midKey; private final HRegionServer server; private final User user; - private TableLock tableLock; SplitRequest(Region region, byte[] midKey, HRegionServer hrs, User user) { Preconditions.checkNotNull(hrs); @@ -58,63 +55,48 @@ class SplitRequest implements Runnable { return "regionName=" + parent + ", midKey=" + Bytes.toStringBinary(midKey); } - private void doSplitting(User user) { + private void doSplitting() { boolean success = false; server.metricsRegionServer.incrSplitRequest(); long startTime = EnvironmentEdgeManager.currentTime(); - SplitTransactionImpl st = new SplitTransactionImpl(parent, midKey); + try { - //acquire a shared read lock on the table, so that table schema modifications - //do not happen concurrently - tableLock = server.getTableLockManager().readLock(parent.getTableDesc().getTableName() - , "SPLIT_REGION:" + parent.getRegionInfo().getRegionNameAsString()); - try { - tableLock.acquire(); - } catch (IOException ex) { - tableLock = null; - throw ex; + long procId; + if (user != null && user.getUGI() != null) { + procId = user.getUGI().doAs (new PrivilegedAction() { + @Override + public Long run() { + try { + return server.requestRegionSplit(parent.getRegionInfo(), midKey); + } catch (Exception e) { + LOG.error("Failed to complete region split ", e); + } + return (long)-1; + } + }); + } else { + procId = server.requestRegionSplit(parent.getRegionInfo(), midKey); } - // If prepare does not return true, for some reason -- logged inside in - // the prepare call -- we are not ready to split just now. Just return. - if (!st.prepare()) return; - try { - st.execute(this.server, this.server, user); - success = true; - } catch (Exception e) { - if (this.server.isStopping() || this.server.isStopped()) { - LOG.info( - "Skip rollback/cleanup of failed split of " - + parent.getRegionInfo().getRegionNameAsString() + " because server is" - + (this.server.isStopping() ? " stopping" : " stopped"), e); - return; - } - if (e instanceof DroppedSnapshotException) { - server.abort("Replay of WAL required. Forcing server shutdown", e); - return; - } + if (procId != -1) { + // wait for the split to complete or get interrupted. If the split completes successfully, + // the procedure will return true; if the split fails, the procedure would throw exception. + // try { - LOG.info("Running rollback/cleanup of failed split of " + - parent.getRegionInfo().getRegionNameAsString() + "; " + e.getMessage(), e); - if (st.rollback(this.server, this.server)) { - LOG.info("Successful rollback of failed split of " + - parent.getRegionInfo().getRegionNameAsString()); - } else { - this.server.abort("Abort; we got an error after point-of-no-return"); + while (!(success = server.isProcedureFinished(procId))) { + try { + Thread.sleep(1000); + } catch (InterruptedException e) { + LOG.warn("Split region " + parent + " is still in progress. Not waiting..."); + break; + } } - } catch (RuntimeException ee) { - String msg = "Failed rollback of failed split of " + - parent.getRegionInfo().getRegionNameAsString() + " -- aborting server"; - // If failed rollback, kill this server to avoid having a hole in table. - LOG.info(msg, ee); - this.server.abort(msg + " -- Cause: " + ee.getMessage()); + } catch (IOException e) { + LOG.error("Split region " + parent + " failed.", e); } - return; + } else { + LOG.error("Fail to split region " + parent); } - } catch (IOException ex) { - ex = ex instanceof RemoteException ? ((RemoteException) ex).unwrapRemoteException() : ex; - LOG.error("Split failed " + this, ex); - server.checkFileSystem(); } finally { if (this.parent.getCoprocessorHost() != null) { try { @@ -124,24 +106,17 @@ class SplitRequest implements Runnable { io instanceof RemoteException ? ((RemoteException) io).unwrapRemoteException() : io); } } + + // Update regionserver metrics with the split transaction total running time + server.metricsRegionServer.updateSplitTime(EnvironmentEdgeManager.currentTime() - startTime); + if (parent.shouldForceSplit()) { parent.clearSplit(); } - releaseTableLock(); - long endTime = EnvironmentEdgeManager.currentTime(); - // Update regionserver metrics with the split transaction total running time - server.metricsRegionServer.updateSplitTime(endTime - startTime); + if (success) { server.metricsRegionServer.incrSplitSuccess(); - // Log success - LOG.info("Region split, hbase:meta updated, and report to master. Parent=" - + parent.getRegionInfo().getRegionNameAsString() + ", new regions: " - + st.getFirstDaughter().getRegionNameAsString() + ", " - + st.getSecondDaughter().getRegionNameAsString() + ". Split took " - + StringUtils.formatTimeDiff(EnvironmentEdgeManager.currentTime(), startTime)); } - // Always log the split transaction journal - LOG.info("Split transaction journal:\n\t" + StringUtils.join("\n\t", st.getJournal())); } } @@ -152,19 +127,7 @@ class SplitRequest implements Runnable { this.server.isStopping() + " or stopped=" + this.server.isStopped()); return; } - doSplitting(user); - } - protected void releaseTableLock() { - if (this.tableLock != null) { - try { - this.tableLock.release(); - } catch (IOException ex) { - LOG.error("Could not release the table lock (something is really wrong). " - + "Aborting this server to avoid holding the lock forever."); - this.server.abort("Abort; we got an error when releasing the table lock " - + "on " + parent.getRegionInfo().getRegionNameAsString()); - } - } + doSplitting(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java index 3fc2ef5c8fa..831db4b8dc3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java @@ -1446,6 +1446,14 @@ public class AccessController extends BaseMasterAndRegionObserver Action.ADMIN, Action.CREATE); } + @Override + public void preSplitRegion( + final ObserverContext ctx, + final TableName tableName, + final byte[] splitRow) throws IOException { + requirePermission(getActiveUser(ctx), "split", tableName, null, null, Action.ADMIN); + } + /* ---- RegionObserver implementation ---- */ @Override @@ -1509,19 +1517,6 @@ public class AccessController extends BaseMasterAndRegionObserver Action.ADMIN, Action.CREATE); } - @Override - public void preSplit(ObserverContext c) throws IOException { - requirePermission(getActiveUser(c), "split", getTableName(c.getEnvironment()), null, null, - Action.ADMIN); - } - - @Override - public void preSplit(ObserverContext c, - byte[] splitRow) throws IOException { - requirePermission(getActiveUser(c), "split", getTableName(c.getEnvironment()), null, null, - Action.ADMIN); - } - @Override public InternalScanner preCompact(ObserverContext c, final Store store, final InternalScanner scanner, final ScanType scanType) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java index aa53d22305b..404c9aeace1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java @@ -306,6 +306,16 @@ public class MockRegionServerServices implements RegionServerServices { return false; } + @Override + public long requestRegionSplit(final HRegionInfo regionInfo, final byte[] splitRow) { + return -1; + } + + @Override + public boolean isProcedureFinished(final long procId) { + return false; + } + @Override public boolean registerService(Service service) { // TODO Auto-generated method stub diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java index b2ef1bdd6e8..465853abcf2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java @@ -163,9 +163,7 @@ public class TestCoprocessorInterface { private boolean postCompactCalled; private boolean preFlushCalled; private boolean postFlushCalled; - private boolean preSplitCalled; private boolean postSplitCalled; - private boolean preSplitWithSplitRowCalled; private ConcurrentMap sharedData; @Override @@ -218,16 +216,6 @@ public class TestCoprocessorInterface { postFlushCalled = true; } @Override - public void preSplit(ObserverContext e) { - preSplitCalled = true; - } - - @Override - public void preSplit(ObserverContext c, - byte[] splitRow) throws IOException { - preSplitWithSplitRowCalled = true; - } - @Override public void postSplit(ObserverContext e, Region l, Region r) { postSplitCalled = true; } @@ -257,7 +245,7 @@ public class TestCoprocessorInterface { return (preCompactCalled && postCompactCalled); } boolean wasSplit() { - return (preSplitCalled && postSplitCalled && preSplitWithSplitRowCalled); + return postSplitCalled; } Map getSharedData() { return sharedData; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java index 9f2d8f5c8cc..c431bebd4f4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java @@ -49,6 +49,7 @@ import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.MasterSwitchType; +import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.master.HMaster; @@ -1466,6 +1467,44 @@ public class TestMasterObserver { public void postBalanceRSGroup(ObserverContext ctx, String groupName, boolean balancerRan) throws IOException { } + + @Override + public void preSplitRegion( + final ObserverContext c, + final TableName tableName, + final byte[] splitRow) throws IOException { + } + + @Override + public void preSplitRegionAction( + final ObserverContext c, + final TableName tableName, + final byte[] splitRow) throws IOException { + } + + @Override + public void postCompletedSplitRegionAction( + final ObserverContext c, + final HRegionInfo regionInfoA, + final HRegionInfo regionInfoB) throws IOException { + } + + @Override + public void preSplitRegionBeforePONRAction( + final ObserverContext ctx, + final byte[] splitKey, + final List metaEntries) throws IOException { + } + + @Override + public void preSplitRegionAfterPONRAction( + final ObserverContext ctx) throws IOException { + } + + @Override + public void preRollBackSplitRegionAction( + final ObserverContext ctx) throws IOException { + } } private static HBaseTestingUtility UTIL = new HBaseTestingUtility(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java index 87fb16957d2..263006856d9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java @@ -286,6 +286,15 @@ public class MockNoopMasterServices implements MasterServices, Server { return -1; } + @Override + public long splitRegion( + final HRegionInfo regionInfo, + final byte[] splitRow, + final long nonceGroup, + final long nonce) throws IOException { + return -1; + } + @Override public TableLockManager getTableLockManager() { return null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java index 3e430b51aaa..0237f8df304 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java @@ -51,6 +51,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest; @@ -382,9 +384,6 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices { return builder.build(); } - - - @Override public MutateResponse mutate(RpcController controller, MutateRequest request) throws ServiceException { @@ -491,6 +490,13 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices { return null; } + @Override + public CloseRegionForSplitResponse closeRegionForSplit( + RpcController controller, + CloseRegionForSplitRequest request) throws ServiceException { + return null; + } + @Override public FlushRegionResponse flushRegion(RpcController controller, FlushRegionRequest request) throws ServiceException { @@ -498,10 +504,19 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices { return null; } + @Override + public long requestRegionSplit(HRegionInfo regionInfo, byte[] splitRow) { + return -1; + } + + @Override + public boolean isProcedureFinished(final long procId) { + return false; + } + @Override public SplitRegionResponse splitRegion(RpcController controller, SplitRegionRequest request) throws ServiceException { - // TODO Auto-generated method stub return null; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSplitTableRegionProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSplitTableRegionProcedure.java new file mode 100644 index 00000000000..147c3548b21 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSplitTableRegionProcedure.java @@ -0,0 +1,480 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.procedure; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.ProcedureInfo; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.Waiter; +import org.apache.hadoop.hbase.client.CompactionState; +import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; +import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; +import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionState; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({MasterTests.class, MediumTests.class}) +public class TestSplitTableRegionProcedure { + private static final Log LOG = LogFactory.getLog(TestSplitTableRegionProcedure.class); + + protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); + + private static long nonceGroup = HConstants.NO_NONCE; + private static long nonce = HConstants.NO_NONCE; + + private static String ColumnFamilyName1 = "cf1"; + private static String ColumnFamilyName2 = "cf2"; + + private static final int startRowNum = 11; + private static final int rowCount = 60; + + private static void setupConf(Configuration conf) { + conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1); + conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 0); + } + + @BeforeClass + public static void setupCluster() throws Exception { + setupConf(UTIL.getConfiguration()); + UTIL.startMiniCluster(3); + } + + @AfterClass + public static void cleanupTest() throws Exception { + try { + UTIL.shutdownMiniCluster(); + } catch (Exception e) { + LOG.warn("failure shutting down cluster", e); + } + } + + @Before + public void setup() throws Exception { + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false); + nonceGroup = + MasterProcedureTestingUtility.generateNonceGroup(UTIL.getHBaseCluster().getMaster()); + nonce = MasterProcedureTestingUtility.generateNonce(UTIL.getHBaseCluster().getMaster()); + + // Turn off balancer so it doesn't cut in and mess up our placements. + UTIL.getHBaseAdmin().setBalancerRunning(false, true); + // Turn off the meta scanner so it don't remove parent on us. + UTIL.getHBaseCluster().getMaster().setCatalogJanitorEnabled(false); + } + + @After + public void tearDown() throws Exception { + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false); + for (HTableDescriptor htd: UTIL.getHBaseAdmin().listTables()) { + LOG.info("Tear down, remove table=" + htd.getTableName()); + UTIL.deleteTable(htd.getTableName()); + } + } + + @Test(timeout=60000) + public void testSplitTableRegion() throws Exception { + final TableName tableName = TableName.valueOf("testSplitTableRegion"); + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + HRegionInfo [] regions = MasterProcedureTestingUtility.createTable( + procExec, tableName, null, ColumnFamilyName1, ColumnFamilyName2); + insertData(tableName); + int splitRowNum = startRowNum + rowCount / 2; + byte[] splitKey = Bytes.toBytes("" + splitRowNum); + + assertTrue("not able to find a splittable region", regions != null); + assertTrue("not able to find a splittable region", regions.length == 1); + + // Split region of the table + long procId = procExec.submitProcedure( + new SplitTableRegionProcedure( + procExec.getEnvironment(), tableName, regions[0], splitKey), + nonceGroup, + nonce); + // Wait the completion + ProcedureTestingUtility.waitProcedure(procExec, procId); + ProcedureTestingUtility.assertProcNotFailed(procExec, procId); + + verify(tableName, splitRowNum); + } + + @Test(timeout=60000) + public void testSplitTableRegionNoStoreFile() throws Exception { + final TableName tableName = TableName.valueOf("testSplitTableRegionNoStoreFile"); + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + HRegionInfo [] regions = MasterProcedureTestingUtility.createTable( + procExec, tableName, null, ColumnFamilyName1, ColumnFamilyName2); + int splitRowNum = startRowNum + rowCount / 2; + byte[] splitKey = Bytes.toBytes("" + splitRowNum); + + assertTrue("not able to find a splittable region", regions != null); + assertTrue("not able to find a splittable region", regions.length == 1); + + // Split region of the table + long procId = procExec.submitProcedure( + new SplitTableRegionProcedure( + procExec.getEnvironment(), tableName, regions[0], splitKey), + nonceGroup, + nonce); + // Wait the completion + ProcedureTestingUtility.waitProcedure(procExec, procId); + ProcedureTestingUtility.assertProcNotFailed(procExec, procId); + + assertTrue(UTIL.getMiniHBaseCluster().getRegions(tableName).size() == 2); + assertTrue(UTIL.countRows(tableName) == 0); + } + + @Test(timeout=60000) + public void testSplitTableRegionUnevenDaughter() throws Exception { + final TableName tableName = TableName.valueOf("testSplitTableRegionUnevenDaughter"); + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + HRegionInfo [] regions = MasterProcedureTestingUtility.createTable( + procExec, tableName, null, ColumnFamilyName1, ColumnFamilyName2); + insertData(tableName); + // Split to two daughters with one of them only has 1 row + int splitRowNum = startRowNum + rowCount / 4; + byte[] splitKey = Bytes.toBytes("" + splitRowNum); + + assertTrue("not able to find a splittable region", regions != null); + assertTrue("not able to find a splittable region", regions.length == 1); + + // Split region of the table + long procId = procExec.submitProcedure( + new SplitTableRegionProcedure( + procExec.getEnvironment(), tableName, regions[0], splitKey), + nonceGroup, + nonce); + // Wait the completion + ProcedureTestingUtility.waitProcedure(procExec, procId); + ProcedureTestingUtility.assertProcNotFailed(procExec, procId); + + verify(tableName, splitRowNum); + } + + @Test(timeout=60000) + public void testSplitTableRegionEmptyDaughter() throws Exception { + final TableName tableName = TableName.valueOf("testSplitTableRegionEmptyDaughter"); + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + HRegionInfo [] regions = MasterProcedureTestingUtility.createTable( + procExec, tableName, null, ColumnFamilyName1, ColumnFamilyName2); + insertData(tableName); + // Split to two daughters with one of them only has 1 row + int splitRowNum = startRowNum + rowCount; + byte[] splitKey = Bytes.toBytes("" + splitRowNum); + + assertTrue("not able to find a splittable region", regions != null); + assertTrue("not able to find a splittable region", regions.length == 1); + + // Split region of the table + long procId = procExec.submitProcedure( + new SplitTableRegionProcedure( + procExec.getEnvironment(), tableName, regions[0], splitKey), + nonceGroup, + nonce); + // Wait the completion + ProcedureTestingUtility.waitProcedure(procExec, procId); + ProcedureTestingUtility.assertProcNotFailed(procExec, procId); + + // Make sure one daughter has 0 rows. + List daughters = UTIL.getMiniHBaseCluster().getRegions(tableName); + assertTrue(daughters.size() == 2); + assertTrue(UTIL.countRows(tableName) == rowCount); + assertTrue(UTIL.countRows(daughters.get(0)) == 0 || UTIL.countRows(daughters.get(1)) == 0); + } + + @Test(timeout=60000) + public void testSplitTableRegionDeletedRowsDaughter() throws Exception { + final TableName tableName = TableName.valueOf("testSplitTableRegionDeletedRowsDaughter"); + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + HRegionInfo [] regions = MasterProcedureTestingUtility.createTable( + procExec, tableName, null, ColumnFamilyName1, ColumnFamilyName2); + insertData(tableName); + // Split to two daughters with one of them only has 1 row + int splitRowNum = rowCount; + deleteData(tableName, splitRowNum); + byte[] splitKey = Bytes.toBytes("" + splitRowNum); + + assertTrue("not able to find a splittable region", regions != null); + assertTrue("not able to find a splittable region", regions.length == 1); + + // Split region of the table + long procId = procExec.submitProcedure( + new SplitTableRegionProcedure( + procExec.getEnvironment(), tableName, regions[0], splitKey), + nonceGroup, + nonce); + // Wait the completion + ProcedureTestingUtility.waitProcedure(procExec, procId); + ProcedureTestingUtility.assertProcNotFailed(procExec, procId); + + UTIL.getHBaseAdmin().majorCompact(tableName); + // waiting for the major compaction to complete + UTIL.waitFor(6000, new Waiter.Predicate() { + @Override + public boolean evaluate() throws IOException { + return UTIL.getHBaseAdmin().getCompactionState(tableName) == CompactionState.NONE; + } + }); + + // Make sure one daughter has 0 rows. + List daughters = UTIL.getMiniHBaseCluster().getRegions(tableName); + assertTrue(daughters.size() == 2); + final int currentRowCount = splitRowNum - startRowNum; + assertTrue(UTIL.countRows(tableName) == currentRowCount); + assertTrue(UTIL.countRows(daughters.get(0)) == 0 || UTIL.countRows(daughters.get(1)) == 0); + } + + @Test(timeout=60000) + public void testSplitTableRegionTwiceWithSameNonce() throws Exception { + final TableName tableName = TableName.valueOf("testSplitTableRegionTwiceWithSameNonce"); + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + HRegionInfo [] regions = MasterProcedureTestingUtility.createTable( + procExec, tableName, null, ColumnFamilyName1, ColumnFamilyName2); + insertData(tableName); + int splitRowNum = startRowNum + rowCount / 2; + byte[] splitKey = Bytes.toBytes("" + splitRowNum); + + assertTrue("not able to find a splittable region", regions != null); + assertTrue("not able to find a splittable region", regions.length == 1); + + // Split region of the table + long procId1 = procExec.submitProcedure( + new SplitTableRegionProcedure( + procExec.getEnvironment(), tableName, regions[0], splitKey), + nonceGroup, + nonce); + // Split region of the table with the same nonce + long procId2 = procExec.submitProcedure( + new SplitTableRegionProcedure( + procExec.getEnvironment(), tableName, regions[0], splitKey), + nonceGroup, + nonce); + + // Wait the completion + ProcedureTestingUtility.waitProcedure(procExec, procId1); + ProcedureTestingUtility.assertProcNotFailed(procExec, procId1); + // The second proc should succeed too - because it is the same proc. + ProcedureTestingUtility.waitProcedure(procExec, procId2); + ProcedureTestingUtility.assertProcNotFailed(procExec, procId2); + assertTrue(procId1 == procId2); + + verify(tableName, splitRowNum); + } + + @Test(timeout=60000) + public void testInvalidSplitKey() throws Exception { + final TableName tableName = TableName.valueOf("testInvalidSplitKey"); + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + HRegionInfo [] regions = MasterProcedureTestingUtility.createTable( + procExec, tableName, null, ColumnFamilyName1, ColumnFamilyName2); + insertData(tableName); + int splitRowNum = startRowNum + rowCount / 2; + byte[] splitKey = Bytes.toBytes("" + splitRowNum); + + assertTrue("not able to find a splittable region", regions != null); + assertTrue("not able to find a splittable region", regions.length == 1); + + // Split region of the table with null split key + long procId1 = procExec.submitProcedure( + new SplitTableRegionProcedure(procExec.getEnvironment(), tableName, regions[0], null), + nonceGroup, + nonce); + ProcedureTestingUtility.waitProcedure(procExec, procId1); + ProcedureInfo result = procExec.getResult(procId1); + assertTrue(result.isFailed()); + LOG.debug("Split failed with exception: " + result.getExceptionFullMessage()); + assertTrue(UTIL.getMiniHBaseCluster().getRegions(tableName).size() == 1); + } + + @Test(timeout = 600000) + public void testRollbackAndDoubleExecution() throws Exception { + final TableName tableName = TableName.valueOf("testRollbackAndDoubleExecution"); + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + HRegionInfo [] regions = MasterProcedureTestingUtility.createTable( + procExec, tableName, null, ColumnFamilyName1, ColumnFamilyName2); + insertData(tableName); + int splitRowNum = startRowNum + rowCount / 2; + byte[] splitKey = Bytes.toBytes("" + splitRowNum); + + assertTrue("not able to find a splittable region", regions != null); + assertTrue("not able to find a splittable region", regions.length == 1); + ProcedureTestingUtility.waitNoProcedureRunning(procExec); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); + + // Split region of the table + long procId = procExec.submitProcedure( + new SplitTableRegionProcedure(procExec.getEnvironment(), tableName, regions[0], splitKey), + nonceGroup, + nonce); + + // Failing before SPLIT_TABLE_REGION_UPDATE_META we should trigger the + // rollback + // NOTE: the 5 (number before SPLIT_TABLE_REGION_UPDATE_META step) is + // hardcoded, so you have to look at this test at least once when you add a new step. + int numberOfSteps = 5; + MasterProcedureTestingUtility.testRollbackAndDoubleExecution( + procExec, + procId, + numberOfSteps); + } + + @Test(timeout=60000) + public void testRecoveryAndDoubleExecution() throws Exception { + final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecution"); + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + HRegionInfo [] regions = MasterProcedureTestingUtility.createTable( + procExec, tableName, null, ColumnFamilyName1, ColumnFamilyName2); + insertData(tableName); + int splitRowNum = startRowNum + rowCount / 2; + byte[] splitKey = Bytes.toBytes("" + splitRowNum); + + assertTrue("not able to find a splittable region", regions != null); + assertTrue("not able to find a splittable region", regions.length == 1); + ProcedureTestingUtility.waitNoProcedureRunning(procExec); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); + + // Split region of the table + long procId = procExec.submitProcedure( + new SplitTableRegionProcedure(procExec.getEnvironment(), tableName, regions[0], splitKey), + nonceGroup, + nonce); + + // Restart the executor and execute the step twice + int numberOfSteps = SplitTableRegionState.values().length; + MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId, numberOfSteps); + ProcedureTestingUtility.assertProcNotFailed(procExec, procId); + + verify(tableName, splitRowNum); + } + + private void insertData(final TableName tableName) throws IOException, InterruptedException { + Table t = UTIL.getConnection().getTable(tableName); + Put p; + for (int i= 0; i < rowCount / 2; i++) { + p = new Put(Bytes.toBytes("" + (startRowNum + i))); + p.addColumn(Bytes.toBytes(ColumnFamilyName1), Bytes.toBytes("q1"), Bytes.toBytes(i)); + p.addColumn(Bytes.toBytes(ColumnFamilyName2), Bytes.toBytes("q2"), Bytes.toBytes(i)); + t.put(p); + p = new Put(Bytes.toBytes("" + (startRowNum + rowCount - i - 1))); + p.addColumn(Bytes.toBytes(ColumnFamilyName1), Bytes.toBytes("q1"), Bytes.toBytes(i)); + p.addColumn(Bytes.toBytes(ColumnFamilyName2), Bytes.toBytes("q2"), Bytes.toBytes(i)); + t.put(p); + if (i % 5 == 0) { + UTIL.getHBaseAdmin().flush(tableName); + } + } + } + + private void deleteData( + final TableName tableName, + final int startDeleteRowNum) throws IOException, InterruptedException { + Table t = UTIL.getConnection().getTable(tableName); + final int numRows = rowCount + startRowNum - startDeleteRowNum; + Delete d; + for (int i= startDeleteRowNum; i <= numRows + startDeleteRowNum; i++) { + d = new Delete(Bytes.toBytes("" + i)); + t.delete(d); + if (i % 5 == 0) { + UTIL.getHBaseAdmin().flush(tableName); + } + } + } + + private void verify(final TableName tableName, final int splitRowNum) throws IOException { + List daughters = UTIL.getMiniHBaseCluster().getRegions(tableName); + assertTrue(daughters.size() == 2); + LOG.info("Row Count = " + UTIL.countRows(tableName)); + assertTrue(UTIL.countRows(tableName) == rowCount); + int startRow; + int numRows; + for (int i = 0; i < daughters.size(); i++) { + if (Bytes.compareTo( + daughters.get(i).getRegionInfo().getStartKey(), HConstants.EMPTY_BYTE_ARRAY) == 0) { + startRow = startRowNum; // first region + numRows = splitRowNum - startRowNum; + } else { + startRow = splitRowNum; + numRows = rowCount + startRowNum - splitRowNum; + } + verifyData( + daughters.get(i), + startRow, + numRows, + ColumnFamilyName1.getBytes(), + ColumnFamilyName2.getBytes()); + } + } + + private void verifyData( + final HRegion newReg, + final int startRow, + final int numRows, + final byte[]... families) + throws IOException { + for (int i = startRow; i < startRow + numRows; i++) { + byte[] row = Bytes.toBytes("" + i); + Get get = new Get(row); + Result result = newReg.get(get); + Cell[] raw = result.rawCells(); + assertEquals(families.length, result.size()); + for (int j = 0; j < families.length; j++) { + assertTrue(CellUtil.matchingRow(raw[j], row)); + assertTrue(CellUtil.matchingFamily(raw[j], families[j])); + } + } + } + + private ProcedureExecutor getMasterProcedureExecutor() { + return UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor(); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java index 8c9db88a50a..a25c1577dfc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java @@ -53,7 +53,6 @@ import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; -import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.coprocessor.BaseMasterObserver; @@ -495,7 +494,6 @@ public class TestNamespaceAuditor { // Make sure no regions have been added. List hris = ADMIN.getTableRegions(tableOne); assertEquals(2, hris.size()); - assertTrue("split completed", observer.preSplitBeforePONR.getCount() == 1); htable.close(); } @@ -570,7 +568,6 @@ public class TestNamespaceAuditor { public static class CustomObserver extends BaseRegionObserver{ volatile CountDownLatch postSplit; - volatile CountDownLatch preSplitBeforePONR; volatile CountDownLatch postCompact; @Override @@ -585,17 +582,9 @@ public class TestNamespaceAuditor { postCompact.countDown(); } - @Override - public void preSplitBeforePONR(ObserverContext ctx, - byte[] splitKey, List metaEntries) throws IOException { - preSplitBeforePONR.countDown(); - } - - @Override public void start(CoprocessorEnvironment e) throws IOException { postSplit = new CountDownLatch(1); - preSplitBeforePONR = new CountDownLatch(1); postCompact = new CountDownLatch(1); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java index 62d1b4957fa..e2a57ebaa8b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java @@ -89,92 +89,6 @@ public class TestEndToEndSplitTransaction { TEST_UTIL.shutdownMiniCluster(); } - @Test - public void testMasterOpsWhileSplitting() throws Exception { - TableName tableName = TableName.valueOf("TestSplit"); - byte[] familyName = Bytes.toBytes("fam"); - try (Table ht = TEST_UTIL.createTable(tableName, familyName)) { - TEST_UTIL.loadTable(ht, familyName, false); - } - HRegionServer server = TEST_UTIL.getHBaseCluster().getRegionServer(0); - byte[] firstRow = Bytes.toBytes("aaa"); - byte[] splitRow = Bytes.toBytes("lll"); - byte[] lastRow = Bytes.toBytes("zzz"); - try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration())) { - // this will also cache the region - byte[] regionName = conn.getRegionLocator(tableName).getRegionLocation(splitRow) - .getRegionInfo().getRegionName(); - Region region = server.getRegion(regionName); - SplitTransactionImpl split = new SplitTransactionImpl((HRegion) region, splitRow); - split.prepare(); - - // 1. phase I - PairOfSameType regions = split.createDaughters(server, server, null); - assertFalse(test(conn, tableName, firstRow, server)); - assertFalse(test(conn, tableName, lastRow, server)); - - // passing null as services prevents final step - // 2, most of phase II - split.openDaughters(server, null, regions.getFirst(), regions.getSecond()); - assertFalse(test(conn, tableName, firstRow, server)); - assertFalse(test(conn, tableName, lastRow, server)); - - // 3. finish phase II - // note that this replicates some code from SplitTransaction - // 2nd daughter first - server.reportRegionStateTransition( - RegionServerStatusProtos.RegionStateTransition.TransitionCode.SPLIT, - region.getRegionInfo(), regions.getFirst().getRegionInfo(), regions.getSecond() - .getRegionInfo()); - - // Add to online regions - server.addToOnlineRegions(regions.getSecond()); - // THIS is the crucial point: - // the 2nd daughter was added, so querying before the split key should fail. - assertFalse(test(conn, tableName, firstRow, server)); - // past splitkey is ok. - assertTrue(test(conn, tableName, lastRow, server)); - - // Add to online regions - server.addToOnlineRegions(regions.getFirst()); - assertTrue(test(conn, tableName, firstRow, server)); - assertTrue(test(conn, tableName, lastRow, server)); - - assertTrue(test(conn, tableName, firstRow, server)); - assertTrue(test(conn, tableName, lastRow, server)); - } - } - - /** - * attempt to locate the region and perform a get and scan - * @return True if successful, False otherwise. - */ - private boolean test(Connection conn, TableName tableName, byte[] row, - HRegionServer server) { - // not using HTable to avoid timeouts and retries - try { - byte[] regionName = conn.getRegionLocator(tableName).getRegionLocation(row, true) - .getRegionInfo().getRegionName(); - // get and scan should now succeed without exception - ClientProtos.GetRequest request = - RequestConverter.buildGetRequest(regionName, new Get(row)); - server.getRSRpcServices().get(null, request); - ScanRequest scanRequest = RequestConverter.buildScanRequest( - regionName, new Scan(row), 1, true); - try { - server.getRSRpcServices().scan( - new HBaseRpcControllerImpl(), scanRequest); - } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException e) { - throw ProtobufUtil.handleRemoteException(e); - } - } catch (IOException e) { - return false; - } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException e1) { - return false; - } - return true; - } - /** * Tests that the client sees meta table changes as atomic during splits */ diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java index 085572d3e3b..dd79e397c83 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java @@ -31,7 +31,6 @@ import java.io.InterruptedIOException; import java.util.Collection; import java.util.List; import java.util.Map; -import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; @@ -52,14 +51,11 @@ import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.MiniHBaseCluster; -import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.UnknownRegionException; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.Connection; -import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Consistency; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; @@ -70,10 +66,9 @@ import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TestReplicasClient.SlowMeCopro; -import org.apache.hadoop.hbase.coordination.ZkCoordinatedStateManager; -import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver; +import org.apache.hadoop.hbase.coprocessor.BaseMasterObserver; +import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.ObserverContext; -import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.master.AssignmentManager; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MasterRpcServices; @@ -95,7 +90,6 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.HBaseFsck; import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; -import org.apache.hadoop.hbase.util.PairOfSameType; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.zookeeper.KeeperException; @@ -105,13 +99,12 @@ import org.junit.AfterClass; import org.junit.Assert; import org.junit.Before; import org.junit.BeforeClass; +import org.junit.Ignore; import org.junit.Test; import org.junit.experimental.categories.Category; /** - * Like TestSplitTransaction in that we're testing {@link SplitTransactionImpl} - * only the below tests are against a running cluster where TestSplitTransaction - * is tests against a bare {@link HRegion}. + * The below tests are testing split region against a running cluster */ @Category({RegionServerTests.class, LargeTests.class}) @SuppressWarnings("deprecation") @@ -121,8 +114,6 @@ public class TestSplitTransactionOnCluster { private Admin admin = null; private MiniHBaseCluster cluster = null; private static final int NB_SERVERS = 3; - private static CountDownLatch latch = new CountDownLatch(1); - private static volatile boolean secondSplit = false; static final HBaseTestingUtility TESTING_UTIL = new HBaseTestingUtility(); @@ -145,21 +136,48 @@ public class TestSplitTransactionOnCluster { @After public void tearDown() throws Exception { this.admin.close(); + for (HTableDescriptor htd: this.admin.listTables()) { + LOG.info("Tear down, remove table=" + htd.getTableName()); + TESTING_UTIL.deleteTable(htd.getTableName()); + } } private HRegionInfo getAndCheckSingleTableRegion(final List regions) throws IOException, InterruptedException { assertEquals(1, regions.size()); HRegionInfo hri = regions.get(0).getRegionInfo(); - TESTING_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager() + cluster.getMaster().getAssignmentManager() .waitOnRegionToClearRegionsInTransition(hri, 600000); return hri; } + private void requestSplitRegion( + final HRegionServer rsServer, + final Region region, + final byte[] midKey) throws IOException { + long procId = cluster.getMaster().splitRegion(region.getRegionInfo(), midKey, 0, 0); + // wait + if (procId != -1) { + // wait for the split to complete or get interrupted. If the split completes successfully, + // the procedure will return true; if the split fails, the procedure would throw exception. + // + while (!rsServer.isProcedureFinished(procId)) { + try { + Thread.sleep(1000); + } catch (InterruptedException e) { + throw new IOException("Split region interrupted."); + } + } + } else { + throw new IOException ("Request split region failed."); + } + } + @Test(timeout = 60000) public void testRITStateForRollback() throws Exception { final TableName tableName = TableName.valueOf("testRITStateForRollback"); + final HMaster master = cluster.getMaster(); try { // Create table then get the single region for our new table. Table t = createTableAndWait(tableName, Bytes.toBytes("cf")); @@ -171,22 +189,25 @@ public class TestSplitTransactionOnCluster { // Turn off balancer so it doesn't cut in and mess up our placements. this.admin.setBalancerRunning(false, true); // Turn off the meta scanner so it don't remove parent on us. - cluster.getMaster().setCatalogJanitorEnabled(false); + master.setCatalogJanitorEnabled(false); // find a splittable region final HRegion region = findSplittableRegion(regions); assertTrue("not able to find a splittable region", region != null); - // install region co-processor to fail splits - region.getCoprocessorHost().load(FailingSplitRegionObserver.class, - Coprocessor.PRIORITY_USER, region.getBaseConf()); + // install master co-processor to fail splits + master.getMasterCoprocessorHost().load( + FailingSplitMasterObserver.class, + Coprocessor.PRIORITY_USER, + master.getConfiguration()); // split async this.admin.splitRegion(region.getRegionInfo().getRegionName(), new byte[] {42}); // we have to wait until the SPLITTING state is seen by the master - FailingSplitRegionObserver observer = (FailingSplitRegionObserver) region - .getCoprocessorHost().findCoprocessor(FailingSplitRegionObserver.class.getName()); + FailingSplitMasterObserver observer = + (FailingSplitMasterObserver) master.getMasterCoprocessorHost().findCoprocessor( + FailingSplitMasterObserver.class.getName()); assertNotNull(observer); observer.latch.await(); @@ -194,10 +215,12 @@ public class TestSplitTransactionOnCluster { cluster.getMaster().getAssignmentManager().waitOnRegionToClearRegionsInTransition(hri, 60000); } finally { admin.setBalancerRunning(true, false); - cluster.getMaster().setCatalogJanitorEnabled(true); + master.setCatalogJanitorEnabled(true); + abortAndWaitForMaster(); TESTING_UTIL.deleteTable(tableName); } } + @Test(timeout = 60000) public void testSplitFailedCompactionAndSplit() throws Exception { final TableName tableName = TableName.valueOf("testSplitFailedCompactionAndSplit"); @@ -238,80 +261,28 @@ public class TestSplitTransactionOnCluster { assertTrue(fileNum > store.getStorefiles().size()); // 3, Split - SplitTransactionImpl st = new SplitTransactionImpl(region, Bytes.toBytes("row3")); - assertTrue(st.prepare()); - st.execute(regionServer, regionServer); + requestSplitRegion(regionServer, region, Bytes.toBytes("row3")); assertEquals(2, cluster.getRegions(tableName).size()); } - public static class FailingSplitRegionObserver extends BaseRegionObserver { + public static class FailingSplitMasterObserver extends BaseMasterObserver { volatile CountDownLatch latch; @Override public void start(CoprocessorEnvironment e) throws IOException { latch = new CountDownLatch(1); } @Override - public void preSplitBeforePONR(ObserverContext ctx, - byte[] splitKey, List metaEntries) throws IOException { + public void preSplitRegionBeforePONRAction( + final ObserverContext ctx, + final byte[] splitKey, + final List metaEntries) throws IOException { latch.countDown(); throw new IOException("Causing rollback of region split"); } } - /** - * A test that intentionally has master fail the processing of the split message. - * Tests that after we process server shutdown, the daughters are up on line. - * @throws IOException - * @throws InterruptedException - * @throws ServiceException - */ - @Test (timeout = 300000) public void testRSSplitDaughtersAreOnlinedAfterShutdownHandling() - throws IOException, InterruptedException, ServiceException { - final TableName tableName = - TableName.valueOf("testRSSplitDaughtersAreOnlinedAfterShutdownHandling"); - - // Create table then get the single region for our new table. - Table t = createTableAndWait(tableName, HConstants.CATALOG_FAMILY); - List regions = cluster.getRegions(tableName); - HRegionInfo hri = getAndCheckSingleTableRegion(regions); - - int tableRegionIndex = ensureTableRegionNotOnSameServerAsMeta(admin, hri); - - // Turn off balancer so it doesn't cut in and mess up our placements. - this.admin.setBalancerRunning(false, true); - // Turn off the meta scanner so it don't remove parent on us. - cluster.getMaster().setCatalogJanitorEnabled(false); - try { - // Add a bit of load up into the table so splittable. - TESTING_UTIL.loadTable(t, HConstants.CATALOG_FAMILY, false); - // Get region pre-split. - HRegionServer server = cluster.getRegionServer(tableRegionIndex); - printOutRegions(server, "Initial regions: "); - int regionCount = ProtobufUtil.getOnlineRegions(server.getRSRpcServices()).size(); - // Now, before we split, set special flag in master, a flag that has - // it FAIL the processing of split. - AssignmentManager.TEST_SKIP_SPLIT_HANDLING = true; - try { - // Now try splitting and it should work. - split(hri, server, regionCount); - } catch (RegionServerStoppedException rsse) { - // Expected. The regionserver should crash - } - - waitUntilRegionServerDead(); - awaitDaughters(tableName, 2); - } finally { - // Set this flag back. - AssignmentManager.TEST_SKIP_SPLIT_HANDLING = false; - admin.setBalancerRunning(true, false); - cluster.getMaster().setCatalogJanitorEnabled(true); - cluster.startRegionServer(); - t.close(); - } - } - - @Test (timeout = 300000) public void testExistingZnodeBlocksSplitAndWeRollback() - throws IOException, InterruptedException, NodeExistsException, KeeperException, ServiceException { + @Test (timeout = 300000) + public void testExistingZnodeBlocksSplitAndWeRollback() throws IOException, InterruptedException { final TableName tableName = TableName.valueOf("testExistingZnodeBlocksSplitAndWeRollback"); @@ -368,8 +339,9 @@ public class TestSplitTransactionOnCluster { * @throws IOException * @throws InterruptedException */ - @Test (timeout=300000) public void testShutdownFixupWhenDaughterHasSplit() - throws IOException, InterruptedException { + @Ignore // TODO: revisit this test when the new AM and SSH is implement + @Test (timeout=300000) + public void testShutdownFixupWhenDaughterHasSplit()throws IOException, InterruptedException { final TableName tableName = TableName.valueOf("testShutdownFixupWhenDaughterHasSplit"); @@ -469,8 +441,8 @@ public class TestSplitTransactionOnCluster { admin.flush(userTableName); } admin.majorCompact(userTableName); - List regionsOfTable = TESTING_UTIL.getMiniHBaseCluster() - .getMaster().getAssignmentManager().getRegionStates() + List regionsOfTable = + cluster.getMaster().getAssignmentManager().getRegionStates() .getRegionsOfTable(userTableName); HRegionInfo hRegionInfo = regionsOfTable.get(0); Put p = new Put("row6".getBytes()); @@ -484,17 +456,18 @@ public class TestSplitTransactionOnCluster { table.put(p); admin.flush(userTableName); admin.splitRegion(hRegionInfo.getRegionName(), "row7".getBytes()); - regionsOfTable = TESTING_UTIL.getMiniHBaseCluster().getMaster() + regionsOfTable = cluster.getMaster() .getAssignmentManager().getRegionStates() .getRegionsOfTable(userTableName); while (regionsOfTable.size() != 2) { Thread.sleep(2000); - regionsOfTable = TESTING_UTIL.getMiniHBaseCluster().getMaster() + regionsOfTable = cluster.getMaster() .getAssignmentManager().getRegionStates() .getRegionsOfTable(userTableName); } Assert.assertEquals(2, regionsOfTable.size()); + Scan s = new Scan(); ResultScanner scanner = table.getScanner(s); int mainTableCount = 0; @@ -583,71 +556,6 @@ public class TestSplitTransactionOnCluster { } } - /** - * - * While transitioning node from RS_ZK_REGION_SPLITTING to - * RS_ZK_REGION_SPLITTING during region split,if zookeper went down split always - * fails for the region. HBASE-6088 fixes this scenario. - * This test case is to test the znode is deleted(if created) or not in roll back. - * - * @throws IOException - * @throws InterruptedException - * @throws KeeperException - */ - @Test(timeout = 60000) - public void testSplitBeforeSettingSplittingInZK() throws Exception, - InterruptedException, KeeperException { - testSplitBeforeSettingSplittingInZKInternals(); - } - - @Test(timeout = 60000) - public void testTableExistsIfTheSpecifiedTableRegionIsSplitParent() throws Exception { - final TableName tableName = - TableName.valueOf("testTableExistsIfTheSpecifiedTableRegionIsSplitParent"); - // Create table then get the single region for our new table. - Table t = createTableAndWait(tableName, Bytes.toBytes("cf")); - List regions = null; - try { - regions = cluster.getRegions(tableName); - int regionServerIndex = cluster.getServerWith(regions.get(0).getRegionInfo() - .getRegionName()); - HRegionServer regionServer = cluster.getRegionServer(regionServerIndex); - insertData(tableName, admin, t); - // Turn off balancer so it doesn't cut in and mess up our placements. - admin.setBalancerRunning(false, true); - // Turn off the meta scanner so it don't remove parent on us. - cluster.getMaster().setCatalogJanitorEnabled(false); - boolean tableExists = MetaTableAccessor.tableExists(regionServer.getConnection(), - tableName); - assertEquals("The specified table should present.", true, tableExists); - final HRegion region = findSplittableRegion(regions); - assertTrue("not able to find a splittable region", region != null); - SplitTransactionImpl st = new SplitTransactionImpl(region, Bytes.toBytes("row2")); - try { - st.prepare(); - st.createDaughters(regionServer, regionServer, null); - } catch (IOException e) { - - } - tableExists = MetaTableAccessor.tableExists(regionServer.getConnection(), - tableName); - assertEquals("The specified table should present.", true, tableExists); - Set rit = cluster.getMaster().getAssignmentManager().getRegionStates() - .getRegionsInTransition(); - assertTrue(rit.size() == 3); - cluster.getMaster().getAssignmentManager().regionOffline(st.getFirstDaughter()); - cluster.getMaster().getAssignmentManager().regionOffline(st.getSecondDaughter()); - cluster.getMaster().getAssignmentManager().regionOffline(region.getRegionInfo()); - rit = cluster.getMaster().getAssignmentManager().getRegionStates().getRegionsInTransition(); - assertTrue(rit.size() == 0); - } finally { - admin.setBalancerRunning(true, false); - cluster.getMaster().setCatalogJanitorEnabled(true); - t.close(); - TESTING_UTIL.deleteTable(tableName); - } - } - @Test public void testSplitWithRegionReplicas() throws Exception { final TableName tableName = @@ -679,10 +587,8 @@ public class TestSplitTransactionOnCluster { regionServerIndex = cluster.getServerWith(region.getRegionInfo().getRegionName()); regionServer = cluster.getRegionServer(regionServerIndex); assertTrue("not able to find a splittable region", region != null); - SplitTransactionImpl st = new SplitTransactionImpl(region, Bytes.toBytes("row2")); try { - st.prepare(); - st.execute(regionServer, regionServer); + requestSplitRegion(regionServer, region, Bytes.toBytes("row2")); } catch (IOException e) { e.printStackTrace(); fail("Split execution should have succeeded with no exceptions thrown " + e); @@ -779,10 +685,8 @@ public class TestSplitTransactionOnCluster { assertTrue("not able to find a splittable region", region != null); // Now split. - SplitTransactionImpl st = new MockedSplitTransaction(region, Bytes.toBytes("row2")); try { - st.prepare(); - st.execute(regionServer, regionServer); + requestSplitRegion(regionServer, region, Bytes.toBytes("row2")); } catch (IOException e) { fail("Split execution should have succeeded with no exceptions thrown"); } @@ -826,195 +730,6 @@ public class TestSplitTransactionOnCluster { } } - /** - * Not really restarting the master. Simulate it by clear of new region - * state since it is not persisted, will be lost after master restarts. - */ - @Test(timeout = 180000) - public void testSplitAndRestartingMaster() throws Exception { - LOG.info("Starting testSplitAndRestartingMaster"); - final TableName tableName = TableName.valueOf("testSplitAndRestartingMaster"); - // Create table then get the single region for our new table. - createTableAndWait(tableName, HConstants.CATALOG_FAMILY); - List regions = cluster.getRegions(tableName); - HRegionInfo hri = getAndCheckSingleTableRegion(regions); - ensureTableRegionNotOnSameServerAsMeta(admin, hri); - int regionServerIndex = cluster.getServerWith(regions.get(0).getRegionInfo() - .getRegionName()); - HRegionServer regionServer = cluster.getRegionServer(regionServerIndex); - // Turn off balancer so it doesn't cut in and mess up our placements. - this.admin.setBalancerRunning(false, true); - // Turn off the meta scanner so it don't remove parent on us. - cluster.getMaster().setCatalogJanitorEnabled(false); - try { - MyMasterRpcServices.enabled.set(true); - // find a splittable region. Refresh the regions list - regions = cluster.getRegions(tableName); - final HRegion region = findSplittableRegion(regions); - assertTrue("not able to find a splittable region", region != null); - - // Now split. - SplitTransactionImpl st = new SplitTransactionImpl(region, Bytes.toBytes("row2")); - try { - st.prepare(); - st.execute(regionServer, regionServer); - } catch (IOException e) { - fail("Split execution should have succeeded with no exceptions thrown"); - } - - // Postcondition - List daughters = cluster.getRegions(tableName); - LOG.info("xxx " + regions.size() + AssignmentManager.TEST_SKIP_SPLIT_HANDLING); - assertTrue(daughters.size() == 2); - } finally { - MyMasterRpcServices.enabled.set(false); - admin.setBalancerRunning(true, false); - cluster.getMaster().setCatalogJanitorEnabled(true); - } - } - - @Test(timeout = 180000) - public void testSplitHooksBeforeAndAfterPONR() throws Exception { - TableName firstTable = TableName.valueOf("testSplitHooksBeforeAndAfterPONR_1"); - TableName secondTable = TableName.valueOf("testSplitHooksBeforeAndAfterPONR_2"); - HColumnDescriptor hcd = new HColumnDescriptor("cf"); - - HTableDescriptor desc = new HTableDescriptor(firstTable); - desc.addCoprocessor(MockedRegionObserver.class.getName()); - desc.addFamily(hcd); - admin.createTable(desc); - TESTING_UTIL.waitUntilAllRegionsAssigned(firstTable); - - desc = new HTableDescriptor(secondTable); - desc.addFamily(hcd); - admin.createTable(desc); - TESTING_UTIL.waitUntilAllRegionsAssigned(secondTable); - - List firstTableRegions = cluster.getRegions(firstTable); - List secondTableRegions = cluster.getRegions(secondTable); - - // Check that both tables actually have regions. - if (firstTableRegions.size() == 0 || secondTableRegions.size() == 0) { - fail("Each table should have at least one region."); - } - ServerName serverName = cluster.getServerHoldingRegion(firstTable, - firstTableRegions.get(0).getRegionInfo().getRegionName()); - admin.move(secondTableRegions.get(0).getRegionInfo().getEncodedNameAsBytes(), - Bytes.toBytes(serverName.getServerName())); - Table table1 = null; - Table table2 = null; - try { - table1 = TESTING_UTIL.getConnection().getTable(firstTable); - table2 = TESTING_UTIL.getConnection().getTable(firstTable); - insertData(firstTable, admin, table1); - insertData(secondTable, admin, table2); - admin.split(firstTable, "row2".getBytes()); - firstTableRegions = cluster.getRegions(firstTable); - while (firstTableRegions.size() != 2) { - Thread.sleep(1000); - firstTableRegions = cluster.getRegions(firstTable); - } - assertEquals("Number of regions after split should be 2.", 2, firstTableRegions.size()); - secondTableRegions = cluster.getRegions(secondTable); - assertEquals("Number of regions after split should be 2.", 2, secondTableRegions.size()); - } finally { - if (table1 != null) { - table1.close(); - } - if (table2 != null) { - table2.close(); - } - TESTING_UTIL.deleteTable(firstTable); - TESTING_UTIL.deleteTable(secondTable); - } - } - - @Test (timeout=300000) - public void testSSHCleanupDaugtherRegionsOfAbortedSplit() throws Exception { - TableName table = TableName.valueOf("testSSHCleanupDaugtherRegionsOfAbortedSplit"); - try { - HTableDescriptor desc = new HTableDescriptor(table); - desc.addFamily(new HColumnDescriptor(Bytes.toBytes("f"))); - admin.createTable(desc); - Connection connection = ConnectionFactory.createConnection(cluster.getConfiguration()); - Table hTable = connection.getTable(desc.getTableName()); - for(int i = 1; i < 5; i++) { - Put p1 = new Put(("r"+i).getBytes()); - p1.addColumn(Bytes.toBytes("f"), "q1".getBytes(), "v".getBytes()); - hTable.put(p1); - } - admin.flush(desc.getTableName()); - List regions = cluster.getRegions(desc.getTableName()); - int serverWith = cluster.getServerWith(regions.get(0).getRegionInfo().getRegionName()); - HRegionServer regionServer = cluster.getRegionServer(serverWith); - SplitTransactionImpl st = new SplitTransactionImpl(regions.get(0), Bytes.toBytes("r3")); - st.prepare(); - st.stepsBeforePONR(regionServer, regionServer, false); - Path tableDir = - FSUtils.getTableDir(cluster.getMaster().getMasterFileSystem().getRootDir(), - desc.getTableName()); - List regionDirs = - FSUtils.getRegionDirs(tableDir.getFileSystem(cluster.getConfiguration()), tableDir); - assertEquals(3,regionDirs.size()); - regionServer.kill(); - // Before we check deadServerInProgress, we should ensure server is dead at master side. - while (!cluster.getMaster().getServerManager(). - getDeadServers().isDeadServer(regionServer.serverName)) { - Thread.sleep(10); - } - // Wait until finish processing of shutdown - while (cluster.getMaster().getServerManager().areDeadServersInProgress()) { - Thread.sleep(10); - } - - AssignmentManager am = cluster.getMaster().getAssignmentManager(); - assertEquals(am.getRegionStates().getRegionsInTransition().toString(), 0, am - .getRegionStates().getRegionsInTransition().size()); - regionDirs = - FSUtils.getRegionDirs(tableDir.getFileSystem(cluster.getConfiguration()), tableDir); - assertEquals(1,regionDirs.size()); - } finally { - TESTING_UTIL.deleteTable(table); - } - } - - private void testSplitBeforeSettingSplittingInZKInternals() throws Exception { - final TableName tableName = TableName.valueOf("testSplitBeforeSettingSplittingInZK"); - try { - // Create table then get the single region for our new table. - createTableAndWait(tableName, Bytes.toBytes("cf")); - - List regions = awaitTableRegions(tableName); - assertTrue("Table not online", cluster.getRegions(tableName).size() != 0); - - int regionServerIndex = cluster.getServerWith(regions.get(0).getRegionInfo() - .getRegionName()); - HRegionServer regionServer = cluster.getRegionServer(regionServerIndex); - final HRegion region = findSplittableRegion(regions); - assertTrue("not able to find a splittable region", region != null); - SplitTransactionImpl st = new MockedSplitTransaction(region, Bytes.toBytes("row2")) { - @Override - public PairOfSameType stepsBeforePONR(final Server server, - final RegionServerServices services, boolean testing) throws IOException { - throw new SplittingNodeCreationFailedException (); - } - }; - try { - st.prepare(); - st.execute(regionServer, regionServer); - } catch (IOException e) { - // check for the specific instance in case the Split failed due to the - // existence of the znode in OPENED state. - // This will at least make the test to fail; - assertTrue("Should be instance of CreateSplittingNodeFailedException", - e instanceof SplittingNodeCreationFailedException ); - assertTrue(st.rollback(regionServer, regionServer)); - } - } finally { - TESTING_UTIL.deleteTable(tableName); - } - } - @Test public void testStoreFileReferenceCreationWhenSplitPolicySaysToSkipRangeCheck() throws Exception { @@ -1052,35 +767,6 @@ public class TestSplitTransactionOnCluster { } } - public static class MockedCoordinatedStateManager extends ZkCoordinatedStateManager { - - public void initialize(Server server, HRegion region) { - this.server = server; - this.watcher = server.getZooKeeper(); - } - } - - public static class MockedSplitTransaction extends SplitTransactionImpl { - - private HRegion currentRegion; - public MockedSplitTransaction(HRegion region, byte[] splitrow) { - super(region, splitrow); - this.currentRegion = region; - } - @Override - public boolean rollback(Server server, RegionServerServices services) throws IOException { - if (this.currentRegion.getRegionInfo().getTable().getNameAsString() - .equals("testShouldFailSplitIfZNodeDoesNotExistDueToPrevRollBack")) { - if(secondSplit){ - super.rollback(server, services); - latch.countDown(); - return true; - } - } - return super.rollback(server, services); - } - } - private HRegion findSplittableRegion(final List regions) throws InterruptedException { for (int i = 0; i < 5; ++i) { for (HRegion r: regions) { @@ -1118,14 +804,13 @@ public class TestSplitTransactionOnCluster { private void split(final HRegionInfo hri, final HRegionServer server, final int regionCount) throws IOException, InterruptedException { this.admin.splitRegion(hri.getRegionName()); - for (int i = 0; ProtobufUtil.getOnlineRegions( - server.getRSRpcServices()).size() <= regionCount && i < 300; i++) { + for (int i = 0; this.cluster.getRegions(hri.getTable()).size() <= regionCount && i < 60; i++) { LOG.debug("Waiting on region to split"); - Thread.sleep(100); + Thread.sleep(2000); } assertFalse("Waited too long for split", - ProtobufUtil.getOnlineRegions(server.getRSRpcServices()).size() <= regionCount); + this.cluster.getRegions(hri.getTable()).size() <= regionCount); } /** @@ -1248,14 +933,6 @@ public class TestSplitTransactionOnCluster { return t; } - private static class SplittingNodeCreationFailedException extends IOException { - private static final long serialVersionUID = 1652404976265623004L; - - public SplittingNodeCreationFailedException () { - super(); - } - } - // Make it public so that JVMClusterUtil can access it. public static class MyMaster extends HMaster { public MyMaster(Configuration conf, CoordinatedStateManager cp) @@ -1297,61 +974,6 @@ public class TestSplitTransactionOnCluster { } } - public static class MockedRegionObserver extends BaseRegionObserver { - private SplitTransactionImpl st = null; - private PairOfSameType daughterRegions = null; - - @Override - public void preSplitBeforePONR(ObserverContext ctx, - byte[] splitKey, List metaEntries) throws IOException { - RegionCoprocessorEnvironment environment = ctx.getEnvironment(); - HRegionServer rs = (HRegionServer) environment.getRegionServerServices(); - List onlineRegions = - rs.getOnlineRegions(TableName.valueOf("testSplitHooksBeforeAndAfterPONR_2")); - Region region = onlineRegions.get(0); - for (Region r : onlineRegions) { - if (r.getRegionInfo().containsRow(splitKey)) { - region = r; - break; - } - } - st = new SplitTransactionImpl((HRegion) region, splitKey); - if (!st.prepare()) { - LOG.error("Prepare for the table " + region.getTableDesc().getNameAsString() - + " failed. So returning null. "); - ctx.bypass(); - return; - } - ((HRegion)region).forceSplit(splitKey); - daughterRegions = st.stepsBeforePONR(rs, rs, false); - HRegionInfo copyOfParent = new HRegionInfo(region.getRegionInfo()); - copyOfParent.setOffline(true); - copyOfParent.setSplit(true); - // Put for parent - Put putParent = MetaTableAccessor.makePutFromRegionInfo(copyOfParent); - MetaTableAccessor.addDaughtersToPut(putParent, daughterRegions.getFirst().getRegionInfo(), - daughterRegions.getSecond().getRegionInfo()); - metaEntries.add(putParent); - // Puts for daughters - Put putA = MetaTableAccessor.makePutFromRegionInfo( - daughterRegions.getFirst().getRegionInfo()); - Put putB = MetaTableAccessor.makePutFromRegionInfo( - daughterRegions.getSecond().getRegionInfo()); - st.addLocation(putA, rs.getServerName(), 1); - st.addLocation(putB, rs.getServerName(), 1); - metaEntries.add(putA); - metaEntries.add(putB); - } - - @Override - public void preSplitAfterPONR(ObserverContext ctx) - throws IOException { - RegionCoprocessorEnvironment environment = ctx.getEnvironment(); - HRegionServer rs = (HRegionServer) environment.getRegionServerServices(); - st.stepsAfterPONR(rs, rs, daughterRegions, null); - } - } - static class CustomSplitPolicy extends RegionSplitPolicy { @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java index ef446937587..c4e9f417333 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java @@ -815,28 +815,16 @@ public class TestAccessController extends SecureTestUtil { verifyDenied(action, USER_NONE, USER_RO, USER_GROUP_ADMIN, USER_GROUP_READ, USER_GROUP_CREATE); } - @Test (timeout=180000) - public void testSplit() throws Exception { - AccessTestAction action = new AccessTestAction() { - @Override - public Object run() throws Exception { - ACCESS_CONTROLLER.preSplit(ObserverContext.createAndPrepare(RCP_ENV, null)); - return null; - } - }; - - verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_OWNER, USER_GROUP_ADMIN); - verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, - USER_GROUP_WRITE, USER_GROUP_CREATE); - } - @Test (timeout=180000) public void testSplitWithSplitRow() throws Exception { + final TableName tname = TableName.valueOf("testSplitWithSplitRow"); + createTestTable(tname); AccessTestAction action = new AccessTestAction() { @Override public Object run() throws Exception { - ACCESS_CONTROLLER.preSplit( - ObserverContext.createAndPrepare(RCP_ENV, null), + ACCESS_CONTROLLER.preSplitRegion( + ObserverContext.createAndPrepare(CP_ENV, null), + tname, TEST_ROW); return null; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestWithDisabledAuthorization.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestWithDisabledAuthorization.java index 32d54b88e50..ad359753960 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestWithDisabledAuthorization.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestWithDisabledAuthorization.java @@ -777,6 +777,18 @@ public class TestWithDisabledAuthorization extends SecureTestUtil { } }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); + // preSplit + verifyAllowed(new AccessTestAction() { + @Override + public Object run() throws Exception { + ACCESS_CONTROLLER.preSplitRegion( + ObserverContext.createAndPrepare(CP_ENV, null), + TEST_TABLE.getTableName(), + Bytes.toBytes("ss")); + return null; + } + }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); + // preSetUserQuota verifyAllowed(new AccessTestAction() { @Override @@ -873,15 +885,6 @@ public class TestWithDisabledAuthorization extends SecureTestUtil { } }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); - // preSplit - verifyAllowed(new AccessTestAction() { - @Override - public Object run() throws Exception { - ACCESS_CONTROLLER.preSplit(ObserverContext.createAndPrepare(RCP_ENV, null)); - return null; - } - }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); - // preGetOp verifyAllowed(new AccessTestAction() { @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java index e03a0d5c496..732e387ef3c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java @@ -49,10 +49,10 @@ import org.apache.hadoop.hbase.master.AssignmentManager; import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.master.RegionStates; import org.apache.hadoop.hbase.master.TableLockManager; +import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; +import org.apache.hadoop.hbase.master.procedure.SplitTableRegionProcedure; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; -import org.apache.hadoop.hbase.regionserver.SplitTransactionImpl; -import org.apache.hadoop.hbase.regionserver.SplitTransactionFactory; import org.apache.hadoop.hbase.regionserver.TestEndToEndSplitTransaction; import org.apache.hadoop.hbase.replication.ReplicationFactory; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; @@ -205,7 +205,8 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck { HConstants.EMPTY_END_ROW, false, false, true); HBaseFsck hbck = doFsck(conf, false); - assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] { HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_HDFS }); + assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] { + HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_HDFS }); doFsck(conf, true); @@ -236,7 +237,8 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck { //to report error if .tableinfo is missing. HBaseFsck hbck = doFsck(conf, false); - assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] { HBaseFsck.ErrorReporter.ERROR_CODE.NO_TABLEINFO_FILE }); + assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] { + HBaseFsck.ErrorReporter.ERROR_CODE.NO_TABLEINFO_FILE }); // fix OrphanTable with default .tableinfo (htd not yet cached on master) hbck = doFsck(conf, true); @@ -311,7 +313,9 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck { HBaseFsck hbck = doFsck(conf, false); assertErrors(hbck, - new HBaseFsck.ErrorReporter.ERROR_CODE[] { HBaseFsck.ErrorReporter.ERROR_CODE.ORPHAN_HDFS_REGION, HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_META_OR_DEPLOYED, + new HBaseFsck.ErrorReporter.ERROR_CODE[] { + HBaseFsck.ErrorReporter.ERROR_CODE.ORPHAN_HDFS_REGION, + HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_META_OR_DEPLOYED, HBaseFsck.ErrorReporter.ERROR_CODE.HOLE_IN_REGION_CHAIN }); // fix the problem. @@ -349,7 +353,8 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck { TEST_UTIL.assertRegionOnServer(hriOverlap, server, REGION_ONLINE_TIMEOUT); HBaseFsck hbck = doFsck(conf, false); - assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] { HBaseFsck.ErrorReporter.ERROR_CODE.OVERLAP_IN_REGION_CHAIN, + assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] { + HBaseFsck.ErrorReporter.ERROR_CODE.OVERLAP_IN_REGION_CHAIN, HBaseFsck.ErrorReporter.ERROR_CODE.OVERLAP_IN_REGION_CHAIN }); assertEquals(3, hbck.getOverlapGroups(table).size()); assertEquals(ROWKEYS.length, countRows()); @@ -386,7 +391,8 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck { admin.enableTable(table); HBaseFsck hbck = doFsck(conf, false); - assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] { HBaseFsck.ErrorReporter.ERROR_CODE.HOLE_IN_REGION_CHAIN }); + assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] { + HBaseFsck.ErrorReporter.ERROR_CODE.HOLE_IN_REGION_CHAIN }); // holes are separate from overlap groups assertEquals(0, hbck.getOverlapGroups(table).size()); @@ -449,7 +455,8 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck { hrs.addToOnlineRegions(r); HBaseFsck hbck = doFsck(conf, false); - assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] { HBaseFsck.ErrorReporter.ERROR_CODE.SHOULD_NOT_BE_DEPLOYED }); + assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] { + HBaseFsck.ErrorReporter.ERROR_CODE.SHOULD_NOT_BE_DEPLOYED }); // fix this fault doFsck(conf, true); @@ -678,9 +685,12 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck { deleteTableDir(table); HBaseFsck hbck = doFsck(conf, false); - assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] {HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_HDFS, - HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_HDFS, HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_HDFS, - HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_HDFS, HBaseFsck.ErrorReporter.ERROR_CODE.ORPHAN_TABLE_STATE, }); + assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] { + HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_HDFS, + HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_HDFS, + HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_HDFS, + HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_HDFS, + HBaseFsck.ErrorReporter.ERROR_CODE.ORPHAN_TABLE_STATE, }); // holes are separate from overlap groups assertEquals(0, hbck.getOverlapGroups(table).size()); @@ -705,7 +715,8 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck { // test HBaseFsck hbck = doFsck(conf, false); - assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] { HBaseFsck.ErrorReporter.ERROR_CODE.NO_VERSION_FILE }); + assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] { + HBaseFsck.ErrorReporter.ERROR_CODE.NO_VERSION_FILE }); // fix hbase.version missing doFsck(conf, true); @@ -727,7 +738,8 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck { // test HBaseFsck hbck = doFsck(conf, false); - assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] { HBaseFsck.ErrorReporter.ERROR_CODE.NO_TABLE_STATE }); + assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] { + HBaseFsck.ErrorReporter.ERROR_CODE.NO_TABLE_STATE }); // fix table state missing doFsck(conf, true); @@ -763,14 +775,17 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck { false, true); // don't rm meta HBaseFsck hbck = doFsck(conf, false); - assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] { HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_HDFS, HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_HDFS }); + assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] { + HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_HDFS, + HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_HDFS }); // fix hole in table 1 doFsck(conf, true, table1); // check that hole in table 1 fixed assertNoErrors(doFsck(conf, false, table1)); // check that hole in table 2 still there - assertErrors(doFsck(conf, false, table2), new HBaseFsck.ErrorReporter.ERROR_CODE[] { HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_HDFS }); + assertErrors(doFsck(conf, false, table2), new HBaseFsck.ErrorReporter.ERROR_CODE[] { + HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_HDFS }); // fix hole in table 2 doFsck(conf, true, table2); @@ -824,16 +839,19 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck { HBaseFsck hbck = doFsck(conf, false); assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] { - HBaseFsck.ErrorReporter.ERROR_CODE.LINGERING_SPLIT_PARENT, HBaseFsck.ErrorReporter.ERROR_CODE.HOLE_IN_REGION_CHAIN}); + HBaseFsck.ErrorReporter.ERROR_CODE.LINGERING_SPLIT_PARENT, + HBaseFsck.ErrorReporter.ERROR_CODE.HOLE_IN_REGION_CHAIN}); // regular repair cannot fix lingering split parent hbck = doFsck(conf, true); assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] { - HBaseFsck.ErrorReporter.ERROR_CODE.LINGERING_SPLIT_PARENT, HBaseFsck.ErrorReporter.ERROR_CODE.HOLE_IN_REGION_CHAIN }); + HBaseFsck.ErrorReporter.ERROR_CODE.LINGERING_SPLIT_PARENT, + HBaseFsck.ErrorReporter.ERROR_CODE.HOLE_IN_REGION_CHAIN }); assertFalse(hbck.shouldRerun()); hbck = doFsck(conf, false); assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] { - HBaseFsck.ErrorReporter.ERROR_CODE.LINGERING_SPLIT_PARENT, HBaseFsck.ErrorReporter.ERROR_CODE.HOLE_IN_REGION_CHAIN}); + HBaseFsck.ErrorReporter.ERROR_CODE.LINGERING_SPLIT_PARENT, + HBaseFsck.ErrorReporter.ERROR_CODE.HOLE_IN_REGION_CHAIN}); // fix lingering split parent hbck = new HBaseFsck(conf, hbfsckExecutorService); @@ -897,7 +915,8 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck { // overlapping regions HBaseFsck hbck = doFsck(conf, true, true, false, false, false, true, true, true, false, false, false, null); - assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] {}); //no LINGERING_SPLIT_PARENT reported + // no LINGERING_SPLIT_PARENT reported + assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] {}); // assert that the split hbase:meta entry is still there. Get get = new Get(hri.getRegionName()); @@ -908,8 +927,8 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck { assertEquals(ROWKEYS.length, countRows()); // assert that we still have the split regions - assertEquals(rl.getStartKeys().length, SPLITS.length + 1 + 1); //SPLITS + 1 is # regions - // pre-split. + //SPLITS + 1 is # regions pre-split. + assertEquals(rl.getStartKeys().length, SPLITS.length + 1 + 1); assertNoErrors(doFsck(conf, false)); } } finally { @@ -1023,7 +1042,8 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck { admin.enableTable(table); HBaseFsck hbck = doFsck(conf, false); - assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] { HBaseFsck.ErrorReporter.ERROR_CODE.FIRST_REGION_STARTKEY_NOT_EMPTY }); + assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] { + HBaseFsck.ErrorReporter.ERROR_CODE.FIRST_REGION_STARTKEY_NOT_EMPTY }); // fix hole doFsck(conf, true); // check that hole fixed @@ -1051,7 +1071,8 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck { false, true); HBaseFsck hbck = doFsck(conf, false); - assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] { HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_HDFS }); + assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] { + HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_HDFS }); // fix hole doFsck(conf, true); // check that hole fixed @@ -1080,7 +1101,8 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck { admin.enableTable(table); HBaseFsck hbck = doFsck(conf, false); - assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] { HBaseFsck.ErrorReporter.ERROR_CODE.LAST_REGION_ENDKEY_NOT_EMPTY }); + assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] { + HBaseFsck.ErrorReporter.ERROR_CODE.LAST_REGION_ENDKEY_NOT_EMPTY }); // fix hole doFsck(conf, true); // check that hole fixed @@ -1108,7 +1130,9 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck { // verify there is no other errors HBaseFsck hbck = doFsck(conf, false); assertErrors(hbck, - new HBaseFsck.ErrorReporter.ERROR_CODE[] { HBaseFsck.ErrorReporter.ERROR_CODE.NOT_DEPLOYED, HBaseFsck.ErrorReporter.ERROR_CODE.HOLE_IN_REGION_CHAIN }); + new HBaseFsck.ErrorReporter.ERROR_CODE[] { + HBaseFsck.ErrorReporter.ERROR_CODE.NOT_DEPLOYED, + HBaseFsck.ErrorReporter.ERROR_CODE.HOLE_IN_REGION_CHAIN }); // verify that noHdfsChecking report the same errors HBaseFsck fsck = new HBaseFsck(conf, hbfsckExecutorService); @@ -1118,7 +1142,9 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck { fsck.setCheckHdfs(false); fsck.onlineHbck(); assertErrors(fsck, - new HBaseFsck.ErrorReporter.ERROR_CODE[] { HBaseFsck.ErrorReporter.ERROR_CODE.NOT_DEPLOYED, HBaseFsck.ErrorReporter.ERROR_CODE.HOLE_IN_REGION_CHAIN }); + new HBaseFsck.ErrorReporter.ERROR_CODE[] { + HBaseFsck.ErrorReporter.ERROR_CODE.NOT_DEPLOYED, + HBaseFsck.ErrorReporter.ERROR_CODE.HOLE_IN_REGION_CHAIN }); fsck.close(); // verify that fixAssignments works fine with noHdfsChecking @@ -1161,7 +1187,9 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck { // verify there is no other errors HBaseFsck hbck = doFsck(conf, false); assertErrors(hbck, - new HBaseFsck.ErrorReporter.ERROR_CODE[] { HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_META, HBaseFsck.ErrorReporter.ERROR_CODE.HOLE_IN_REGION_CHAIN }); + new HBaseFsck.ErrorReporter.ERROR_CODE[] { + HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_META, + HBaseFsck.ErrorReporter.ERROR_CODE.HOLE_IN_REGION_CHAIN }); // verify that noHdfsChecking report the same errors HBaseFsck fsck = new HBaseFsck(conf, hbfsckExecutorService); @@ -1171,7 +1199,9 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck { fsck.setCheckHdfs(false); fsck.onlineHbck(); assertErrors(fsck, - new HBaseFsck.ErrorReporter.ERROR_CODE[] { HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_META, HBaseFsck.ErrorReporter.ERROR_CODE.HOLE_IN_REGION_CHAIN }); + new HBaseFsck.ErrorReporter.ERROR_CODE[] { + HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_META, + HBaseFsck.ErrorReporter.ERROR_CODE.HOLE_IN_REGION_CHAIN }); fsck.close(); // verify that fixMeta doesn't work with noHdfsChecking @@ -1185,7 +1215,9 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck { fsck.onlineHbck(); assertFalse(fsck.shouldRerun()); assertErrors(fsck, - new HBaseFsck.ErrorReporter.ERROR_CODE[] { HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_META, HBaseFsck.ErrorReporter.ERROR_CODE.HOLE_IN_REGION_CHAIN }); + new HBaseFsck.ErrorReporter.ERROR_CODE[] { + HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_META, + HBaseFsck.ErrorReporter.ERROR_CODE.HOLE_IN_REGION_CHAIN }); fsck.close(); // fix the cluster so other tests won't be impacted @@ -1225,7 +1257,8 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck { HBaseFsck hbck = doFsck(conf, false); assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] { - HBaseFsck.ErrorReporter.ERROR_CODE.ORPHAN_HDFS_REGION, HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_META_OR_DEPLOYED, + HBaseFsck.ErrorReporter.ERROR_CODE.ORPHAN_HDFS_REGION, + HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_META_OR_DEPLOYED, HBaseFsck.ErrorReporter.ERROR_CODE.HOLE_IN_REGION_CHAIN}); // verify that noHdfsChecking can't detect ORPHAN_HDFS_REGION @@ -1235,7 +1268,8 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck { fsck.setTimeLag(0); fsck.setCheckHdfs(false); fsck.onlineHbck(); - assertErrors(fsck, new HBaseFsck.ErrorReporter.ERROR_CODE[] { HBaseFsck.ErrorReporter.ERROR_CODE.HOLE_IN_REGION_CHAIN }); + assertErrors(fsck, new HBaseFsck.ErrorReporter.ERROR_CODE[] { + HBaseFsck.ErrorReporter.ERROR_CODE.HOLE_IN_REGION_CHAIN }); fsck.close(); // verify that fixHdfsHoles doesn't work with noHdfsChecking @@ -1249,7 +1283,8 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck { fsck.setFixHdfsOrphans(true); fsck.onlineHbck(); assertFalse(fsck.shouldRerun()); - assertErrors(fsck, new HBaseFsck.ErrorReporter.ERROR_CODE[] { HBaseFsck.ErrorReporter.ERROR_CODE.HOLE_IN_REGION_CHAIN }); + assertErrors(fsck, new HBaseFsck.ErrorReporter.ERROR_CODE[] { + HBaseFsck.ErrorReporter.ERROR_CODE.HOLE_IN_REGION_CHAIN }); fsck.close(); } finally { if (admin.isTableDisabled(table)) { @@ -1349,7 +1384,9 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck { TEST_UTIL.assertRegionOnServer(hriDupe, server, REGION_ONLINE_TIMEOUT); HBaseFsck hbck = doFsck(conf,false); - assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] { HBaseFsck.ErrorReporter.ERROR_CODE.DEGENERATE_REGION, HBaseFsck.ErrorReporter.ERROR_CODE.DUPE_STARTKEYS, + assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] { + HBaseFsck.ErrorReporter.ERROR_CODE.DEGENERATE_REGION, + HBaseFsck.ErrorReporter.ERROR_CODE.DUPE_STARTKEYS, HBaseFsck.ErrorReporter.ERROR_CODE.DUPE_STARTKEYS }); assertEquals(2, hbck.getOverlapGroups(table).size()); assertEquals(ROWKEYS.length, countRows()); @@ -1405,13 +1442,15 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck { meta.close(); HBaseFsck hbck = doFsck(conf, false); - assertTrue(hbck.getErrors().getErrorList().contains(HBaseFsck.ErrorReporter.ERROR_CODE.EMPTY_META_CELL)); + assertTrue(hbck.getErrors().getErrorList().contains( + HBaseFsck.ErrorReporter.ERROR_CODE.EMPTY_META_CELL)); // fix reference file hbck = doFsck(conf, true); // check that reference file fixed - assertFalse(hbck.getErrors().getErrorList().contains(HBaseFsck.ErrorReporter.ERROR_CODE.EMPTY_META_CELL)); + assertFalse(hbck.getErrors().getErrorList().contains( + HBaseFsck.ErrorReporter.ERROR_CODE.EMPTY_META_CELL)); } finally { cleanupTable(table); } @@ -1668,16 +1707,22 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck { List regions = cluster.getRegions(desc.getTableName()); int serverWith = cluster.getServerWith(regions.get(0).getRegionInfo().getRegionName()); HRegionServer regionServer = cluster.getRegionServer(serverWith); - cluster.getServerWith(regions.get(0).getRegionInfo().getRegionName()); - SplitTransactionImpl st = (SplitTransactionImpl) - new SplitTransactionFactory(TEST_UTIL.getConfiguration()) - .create(regions.get(0), Bytes.toBytes("r3")); - st.prepare(); - st.stepsBeforePONR(regionServer, regionServer, false); + byte[] parentRegionName = regions.get(0).getRegionInfo().getRegionName(); + cluster.getServerWith(parentRegionName); + // Create daughters without adding to META table + MasterProcedureEnv env = cluster.getMaster().getMasterProcedureExecutor().getEnvironment(); + SplitTableRegionProcedure splitR = new SplitTableRegionProcedure( + env, desc.getTableName(), regions.get(0).getRegionInfo(), Bytes.toBytes("r3")); + splitR.prepareSplitRegion(env); + splitR.setRegionStateToSplitting(env); + splitR.closeParentRegionForSplit(env); + splitR.createDaughterRegions(env); + AssignmentManager am = cluster.getMaster().getAssignmentManager(); for (RegionState state : am.getRegionStates().getRegionsInTransition()) { am.regionOffline(state.getRegion()); } + Map regionsMap = new HashMap(); regionsMap.put(regions.get(0).getRegionInfo(), regionServer.getServerName()); am.assign(regionsMap);