Revert "HBASE-18229: create new Async Split API to embrace AM v2"

TestShell is failing.

This reverts commit 290fb8965d.
This commit is contained in:
Michael Stack 2017-06-30 03:30:50 -07:00
parent 926781540c
commit 619dcf24f4
18 changed files with 273 additions and 742 deletions

View File

@ -985,8 +985,6 @@ public interface Admin extends Abortable, Closeable {
* *
* @param regionName region to split * @param regionName region to split
* @throws IOException if a remote or network exception occurs * @throws IOException if a remote or network exception occurs
* @deprecated Since 2.0. Will be removed in 3.0. Use
* {@link #splitRegionAsync(byte[], byte[])} instead.
*/ */
void splitRegion(final byte[] regionName) throws IOException; void splitRegion(final byte[] regionName) throws IOException;
@ -1006,21 +1004,10 @@ public interface Admin extends Abortable, Closeable {
* @param regionName region to split * @param regionName region to split
* @param splitPoint the explicit position to split on * @param splitPoint the explicit position to split on
* @throws IOException if a remote or network exception occurs * @throws IOException if a remote or network exception occurs
* @deprecated Since 2.0. Will be removed in 3.0. Use
* {@link #splitRegionAsync(byte[], byte[])} instead.
*/ */
void splitRegion(final byte[] regionName, final byte[] splitPoint) void splitRegion(final byte[] regionName, final byte[] splitPoint)
throws IOException; throws IOException;
/**
* Split an individual region. Asynchronous operation.
* @param regionName region to split
* @param splitPoint the explicit position to split on
* @throws IOException if a remote or network exception occurs
*/
Future<Void> splitRegionAsync(byte[] regionName, byte[] splitPoint)
throws IOException;
/** /**
* Modify an existing table, more IRB friendly version. * Modify an existing table, more IRB friendly version.
* *

View File

@ -160,8 +160,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRe
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest;
@ -1104,7 +1102,7 @@ public class AsyncHBaseAdmin implements AsyncAdmin {
if (hri == null || hri.isSplitParent() if (hri == null || hri.isSplitParent()
|| hri.getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) || hri.getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID)
continue; continue;
splitFutures.add(split(hri, Optional.empty())); splitFutures.add(split(h.getServerName(), hri, Optional.empty()));
} }
} }
} }
@ -1172,7 +1170,7 @@ public class AsyncHBaseAdmin implements AsyncAdmin {
.toStringBinary(regionName))); .toStringBinary(regionName)));
return; return;
} }
split(regionInfo, splitPoint).whenComplete((ret, err2) -> { split(serverName, regionInfo, splitPoint).whenComplete((ret, err2) -> {
if (err2 != null) { if (err2 != null) {
future.completeExceptionally(err2); future.completeExceptionally(err2);
} else { } else {
@ -1183,36 +1181,21 @@ public class AsyncHBaseAdmin implements AsyncAdmin {
return future; return future;
} }
private CompletableFuture<Void> split(final HRegionInfo hri, private CompletableFuture<Void> split(final ServerName sn, final HRegionInfo hri,
Optional<byte[]> splitPoint) { Optional<byte[]> splitPoint) {
if (hri.getStartKey() != null && splitPoint.isPresent() if (hri.getStartKey() != null && splitPoint.isPresent()
&& Bytes.compareTo(hri.getStartKey(), splitPoint.get()) == 0) { && Bytes.compareTo(hri.getStartKey(), splitPoint.get()) == 0) {
return failedFuture(new IllegalArgumentException( return failedFuture(new IllegalArgumentException(
"should not give a splitkey which equals to startkey!")); "should not give a splitkey which equals to startkey!"));
} }
return this
CompletableFuture<Void> future = new CompletableFuture<>(); .<Void> newAdminCaller()
TableName tableName = hri.getTable(); .action(
SplitTableRegionRequest request = null; (controller, stub) -> this.<SplitRegionRequest, SplitRegionResponse, Void> adminCall(
try { controller, stub,
request = RequestConverter ProtobufUtil.buildSplitRegionRequest(hri.getRegionName(), splitPoint),
.buildSplitTableRegionRequest(hri, splitPoint.isPresent() ? splitPoint.get() : null, (s, c, req, done) -> s.splitRegion(controller, req, done), resp -> null))
ng.getNonceGroup(), ng.newNonce()); .serverName(sn).call();
} catch (DeserializationException e) {
future.completeExceptionally(e);
return future;
}
this.<SplitTableRegionRequest, SplitTableRegionResponse>procedureCall(request,
(s, c, req, done) -> s.splitRegion(c, req, done), (resp) -> resp.getProcId(),
new SplitTableRegionProcedureBiConsumer(this, tableName)).whenComplete((ret, err2) -> {
if (err2 != null) {
future.completeExceptionally(err2);
} else {
future.complete(ret);
}
});
return future;
} }
@Override @Override
@ -2259,17 +2242,6 @@ public class AsyncHBaseAdmin implements AsyncAdmin {
} }
} }
private class SplitTableRegionProcedureBiConsumer extends TableProcedureBiConsumer {
SplitTableRegionProcedureBiConsumer(AsyncAdmin admin, TableName tableName) {
super(admin, tableName);
}
String getOperationType() {
return "SPLIT_REGION";
}
}
private CompletableFuture<Void> waitProcedureResult(CompletableFuture<Long> procFuture) { private CompletableFuture<Void> waitProcedureResult(CompletableFuture<Long> procFuture) {
CompletableFuture<Void> future = new CompletableFuture<>(); CompletableFuture<Void> future = new CompletableFuture<>();
procFuture.whenComplete((procId, error) -> { procFuture.whenComplete((procId, error) -> {

View File

@ -177,8 +177,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormali
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse;
@ -1637,97 +1635,6 @@ public class HBaseAdmin implements Admin {
return "MERGE_REGIONS"; return "MERGE_REGIONS";
} }
} }
/**
* Split one region. Synchronous operation.
* Note: It is not feasible to predict the length of split.
* Therefore, this is for internal testing only.
* @param regionName encoded or full name of region
* @param splitPoint key where region splits
* @throws IOException
*/
@VisibleForTesting
public void splitRegionSync(byte[] regionName, byte[] splitPoint) throws IOException {
splitRegionSync(regionName, splitPoint, syncWaitTimeout, TimeUnit.MILLISECONDS);
}
/**
* Split one region. Synchronous operation.
* @param regionName region to be split
* @param splitPoint split point
* @param timeout how long to wait on split
* @param units time units
* @throws IOException
*/
public void splitRegionSync(byte[] regionName, byte[] splitPoint,
final long timeout, final TimeUnit units) throws IOException {
get(
splitRegionAsync(regionName, splitPoint),
timeout,
units);
}
@Override
public Future<Void> splitRegionAsync(byte[] regionName, byte[] splitPoint)
throws IOException {
byte[] encodedNameofRegionToSplit = HRegionInfo.isEncodedRegionName(regionName) ?
regionName : HRegionInfo.encodeRegionName(regionName).getBytes();
Pair<HRegionInfo, ServerName> pair = getRegion(regionName);
if (pair != null) {
if (pair.getFirst() != null &&
pair.getFirst().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) {
throw new IllegalArgumentException ("Can't invoke split on non-default regions directly");
}
} else {
throw new UnknownRegionException (
"Can't invoke merge on unknown region "
+ Bytes.toStringBinary(encodedNameofRegionToSplit));
}
HRegionInfo hri = pair.getFirst();
return splitRegionAsync(hri, splitPoint);
}
Future<Void> splitRegionAsync(HRegionInfo hri, byte[] splitPoint) throws IOException {
TableName tableName = hri.getTable();
if (hri.getStartKey() != null && splitPoint != null &&
Bytes.compareTo(hri.getStartKey(), splitPoint) == 0) {
throw new IOException("should not give a splitkey which equals to startkey!");
}
SplitTableRegionResponse response = executeCallable(
new MasterCallable<SplitTableRegionResponse>(getConnection(), getRpcControllerFactory()) {
@Override
protected SplitTableRegionResponse rpcCall() throws Exception {
setPriority(tableName);
SplitTableRegionRequest request = RequestConverter
.buildSplitTableRegionRequest(hri, splitPoint, ng.getNonceGroup(), ng.newNonce());
return master.splitRegion(getRpcController(), request);
}
});
return new SplitTableRegionFuture(this, tableName, response);
}
private static class SplitTableRegionFuture extends TableFuture<Void> {
public SplitTableRegionFuture(final HBaseAdmin admin,
final TableName tableName,
final SplitTableRegionResponse response) {
super(admin, tableName,
(response != null && response.hasProcId()) ? response.getProcId() : null);
}
public SplitTableRegionFuture(
final HBaseAdmin admin,
final TableName tableName,
final Long procId) {
super(admin, tableName, procId);
}
@Override
public String getOperationType() {
return "SPLIT_REGION";
}
}
@Override @Override
public void split(final TableName tableName) throws IOException { public void split(final TableName tableName) throws IOException {
@ -1739,6 +1646,9 @@ public class HBaseAdmin implements Admin {
splitRegion(regionName, null); splitRegion(regionName, null);
} }
/**
* {@inheritDoc}
*/
@Override @Override
public void split(final TableName tableName, final byte [] splitPoint) throws IOException { public void split(final TableName tableName, final byte [] splitPoint) throws IOException {
ZooKeeperWatcher zookeeper = null; ZooKeeperWatcher zookeeper = null;
@ -1752,9 +1662,6 @@ public class HBaseAdmin implements Admin {
} else { } else {
pairs = MetaTableAccessor.getTableRegionsAndLocations(connection, tableName); pairs = MetaTableAccessor.getTableRegionsAndLocations(connection, tableName);
} }
if (splitPoint == null) {
LOG.info("SplitPoint is null, will find bestSplitPoint from Region");
}
for (Pair<HRegionInfo, ServerName> pair: pairs) { for (Pair<HRegionInfo, ServerName> pair: pairs) {
// May not be a server for a particular row // May not be a server for a particular row
if (pair.getSecond() == null) continue; if (pair.getSecond() == null) continue;
@ -1764,8 +1671,8 @@ public class HBaseAdmin implements Admin {
// if a split point given, only split that particular region // if a split point given, only split that particular region
if (r.getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID || if (r.getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID ||
(splitPoint != null && !r.containsRow(splitPoint))) continue; (splitPoint != null && !r.containsRow(splitPoint))) continue;
// call out to master to do split now // call out to region server to do split now
splitRegionAsync(pair.getFirst(), splitPoint); split(pair.getSecond(), pair.getFirst(), splitPoint);
} }
} finally { } finally {
if (zookeeper != null) { if (zookeeper != null) {
@ -1788,7 +1695,23 @@ public class HBaseAdmin implements Admin {
if (regionServerPair.getSecond() == null) { if (regionServerPair.getSecond() == null) {
throw new NoServerForRegionException(Bytes.toStringBinary(regionName)); throw new NoServerForRegionException(Bytes.toStringBinary(regionName));
} }
splitRegionAsync(regionServerPair.getFirst(), splitPoint); split(regionServerPair.getSecond(), regionServerPair.getFirst(), splitPoint);
}
@VisibleForTesting
public void split(final ServerName sn, final HRegionInfo hri,
byte[] splitPoint) throws IOException {
if (hri.getStartKey() != null && splitPoint != null &&
Bytes.compareTo(hri.getStartKey(), splitPoint) == 0) {
throw new IOException("should not give a splitkey which equals to startkey!");
}
// TODO: There is no timeout on this controller. Set one!
HBaseRpcController controller = rpcControllerFactory.newController();
controller.setPriority(hri.getTable());
// TODO: this does not do retries, it should. Set priority and timeout in controller
AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
ProtobufUtil.split(controller, admin, hri, splitPoint);
} }
@Override @Override

View File

@ -118,8 +118,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCleaner
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesRequest;
@ -784,19 +782,6 @@ public final class RequestConverter {
public static GetRegionInfoRequest public static GetRegionInfoRequest
buildGetRegionInfoRequest(final byte[] regionName, buildGetRegionInfoRequest(final byte[] regionName,
final boolean includeCompactionState) { final boolean includeCompactionState) {
return buildGetRegionInfoRequest(regionName, includeCompactionState, false);
}
/**
*
* @param regionName the name of the region to get info
* @param includeCompactionState indicate if the compaction state is requested
* @param includeBestSplitRow indicate if the bestSplitRow is requested
* @return protocol buffer GetRegionInfoRequest
*/
public static GetRegionInfoRequest
buildGetRegionInfoRequest(final byte[] regionName,
final boolean includeCompactionState, final boolean includeBestSplitRow) {
GetRegionInfoRequest.Builder builder = GetRegionInfoRequest.newBuilder(); GetRegionInfoRequest.Builder builder = GetRegionInfoRequest.newBuilder();
RegionSpecifier region = buildRegionSpecifier( RegionSpecifier region = buildRegionSpecifier(
RegionSpecifierType.REGION_NAME, regionName); RegionSpecifierType.REGION_NAME, regionName);
@ -804,13 +789,9 @@ public final class RequestConverter {
if (includeCompactionState) { if (includeCompactionState) {
builder.setCompactionState(includeCompactionState); builder.setCompactionState(includeCompactionState);
} }
if(includeBestSplitRow) {
builder.setBestSplitRow(includeBestSplitRow);
}
return builder.build(); return builder.build();
} }
/** /**
* Create a protocol buffer GetRegionLoadRequest for all regions/regions of a table. * Create a protocol buffer GetRegionLoadRequest for all regions/regions of a table.
* *
@ -1168,21 +1149,6 @@ public final class RequestConverter {
return builder.build(); return builder.build();
} }
public static SplitTableRegionRequest buildSplitTableRegionRequest(
final HRegionInfo regionInfo,
final byte[] splitRow,
final long nonceGroup,
final long nonce) throws DeserializationException {
SplitTableRegionRequest.Builder builder = SplitTableRegionRequest.newBuilder();
builder.setRegionInfo(HRegionInfo.convert(regionInfo));
if (splitRow != null) {
builder.setSplitRow(UnsafeByteOperations.unsafeWrap(splitRow));
}
builder.setNonceGroup(nonceGroup);
builder.setNonce(nonce);
return builder.build();
}
/** /**
* Create a protocol buffer AssignRegionRequest * Create a protocol buffer AssignRegionRequest
* *

View File

@ -39,15 +39,6 @@ public final class AdminProtos {
* <code>optional bool compaction_state = 2;</code> * <code>optional bool compaction_state = 2;</code>
*/ */
boolean getCompactionState(); boolean getCompactionState();
/**
* <code>optional bool best_split_row = 3;</code>
*/
boolean hasBestSplitRow();
/**
* <code>optional bool best_split_row = 3;</code>
*/
boolean getBestSplitRow();
} }
/** /**
* Protobuf type {@code hbase.pb.GetRegionInfoRequest} * Protobuf type {@code hbase.pb.GetRegionInfoRequest}
@ -62,7 +53,6 @@ public final class AdminProtos {
} }
private GetRegionInfoRequest() { private GetRegionInfoRequest() {
compactionState_ = false; compactionState_ = false;
bestSplitRow_ = false;
} }
@java.lang.Override @java.lang.Override
@ -111,11 +101,6 @@ public final class AdminProtos {
compactionState_ = input.readBool(); compactionState_ = input.readBool();
break; break;
} }
case 24: {
bitField0_ |= 0x00000004;
bestSplitRow_ = input.readBool();
break;
}
} }
} }
} catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
@ -177,21 +162,6 @@ public final class AdminProtos {
return compactionState_; return compactionState_;
} }
public static final int BEST_SPLIT_ROW_FIELD_NUMBER = 3;
private boolean bestSplitRow_;
/**
* <code>optional bool best_split_row = 3;</code>
*/
public boolean hasBestSplitRow() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* <code>optional bool best_split_row = 3;</code>
*/
public boolean getBestSplitRow() {
return bestSplitRow_;
}
private byte memoizedIsInitialized = -1; private byte memoizedIsInitialized = -1;
public final boolean isInitialized() { public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized; byte isInitialized = memoizedIsInitialized;
@ -218,9 +188,6 @@ public final class AdminProtos {
if (((bitField0_ & 0x00000002) == 0x00000002)) { if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBool(2, compactionState_); output.writeBool(2, compactionState_);
} }
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeBool(3, bestSplitRow_);
}
unknownFields.writeTo(output); unknownFields.writeTo(output);
} }
@ -237,10 +204,6 @@ public final class AdminProtos {
size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
.computeBoolSize(2, compactionState_); .computeBoolSize(2, compactionState_);
} }
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
.computeBoolSize(3, bestSplitRow_);
}
size += unknownFields.getSerializedSize(); size += unknownFields.getSerializedSize();
memoizedSize = size; memoizedSize = size;
return size; return size;
@ -268,11 +231,6 @@ public final class AdminProtos {
result = result && (getCompactionState() result = result && (getCompactionState()
== other.getCompactionState()); == other.getCompactionState());
} }
result = result && (hasBestSplitRow() == other.hasBestSplitRow());
if (hasBestSplitRow()) {
result = result && (getBestSplitRow()
== other.getBestSplitRow());
}
result = result && unknownFields.equals(other.unknownFields); result = result && unknownFields.equals(other.unknownFields);
return result; return result;
} }
@ -293,11 +251,6 @@ public final class AdminProtos {
hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashBoolean( hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashBoolean(
getCompactionState()); getCompactionState());
} }
if (hasBestSplitRow()) {
hash = (37 * hash) + BEST_SPLIT_ROW_FIELD_NUMBER;
hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashBoolean(
getBestSplitRow());
}
hash = (29 * hash) + unknownFields.hashCode(); hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash; memoizedHashCode = hash;
return hash; return hash;
@ -425,8 +378,6 @@ public final class AdminProtos {
bitField0_ = (bitField0_ & ~0x00000001); bitField0_ = (bitField0_ & ~0x00000001);
compactionState_ = false; compactionState_ = false;
bitField0_ = (bitField0_ & ~0x00000002); bitField0_ = (bitField0_ & ~0x00000002);
bestSplitRow_ = false;
bitField0_ = (bitField0_ & ~0x00000004);
return this; return this;
} }
@ -463,10 +414,6 @@ public final class AdminProtos {
to_bitField0_ |= 0x00000002; to_bitField0_ |= 0x00000002;
} }
result.compactionState_ = compactionState_; result.compactionState_ = compactionState_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.bestSplitRow_ = bestSplitRow_;
result.bitField0_ = to_bitField0_; result.bitField0_ = to_bitField0_;
onBuilt(); onBuilt();
return result; return result;
@ -515,9 +462,6 @@ public final class AdminProtos {
if (other.hasCompactionState()) { if (other.hasCompactionState()) {
setCompactionState(other.getCompactionState()); setCompactionState(other.getCompactionState());
} }
if (other.hasBestSplitRow()) {
setBestSplitRow(other.getBestSplitRow());
}
this.mergeUnknownFields(other.unknownFields); this.mergeUnknownFields(other.unknownFields);
onChanged(); onChanged();
return this; return this;
@ -701,38 +645,6 @@ public final class AdminProtos {
onChanged(); onChanged();
return this; return this;
} }
private boolean bestSplitRow_ ;
/**
* <code>optional bool best_split_row = 3;</code>
*/
public boolean hasBestSplitRow() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* <code>optional bool best_split_row = 3;</code>
*/
public boolean getBestSplitRow() {
return bestSplitRow_;
}
/**
* <code>optional bool best_split_row = 3;</code>
*/
public Builder setBestSplitRow(boolean value) {
bitField0_ |= 0x00000004;
bestSplitRow_ = value;
onChanged();
return this;
}
/**
* <code>optional bool best_split_row = 3;</code>
*/
public Builder clearBestSplitRow() {
bitField0_ = (bitField0_ & ~0x00000004);
bestSplitRow_ = false;
onChanged();
return this;
}
public final Builder setUnknownFields( public final Builder setUnknownFields(
final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields); return super.setUnknownFields(unknownFields);
@ -850,23 +762,6 @@ public final class AdminProtos {
* <code>optional bool mergeable = 5;</code> * <code>optional bool mergeable = 5;</code>
*/ */
boolean getMergeable(); boolean getMergeable();
/**
* <pre>
* Get bestSplitRow
* </pre>
*
* <code>optional bytes best_split_row = 6;</code>
*/
boolean hasBestSplitRow();
/**
* <pre>
* Get bestSplitRow
* </pre>
*
* <code>optional bytes best_split_row = 6;</code>
*/
org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString getBestSplitRow();
} }
/** /**
* Protobuf type {@code hbase.pb.GetRegionInfoResponse} * Protobuf type {@code hbase.pb.GetRegionInfoResponse}
@ -884,7 +779,6 @@ public final class AdminProtos {
isRecovering_ = false; isRecovering_ = false;
splittable_ = false; splittable_ = false;
mergeable_ = false; mergeable_ = false;
bestSplitRow_ = org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.EMPTY;
} }
@java.lang.Override @java.lang.Override
@ -954,11 +848,6 @@ public final class AdminProtos {
mergeable_ = input.readBool(); mergeable_ = input.readBool();
break; break;
} }
case 50: {
bitField0_ |= 0x00000020;
bestSplitRow_ = input.readBytes();
break;
}
} }
} }
} catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
@ -1190,29 +1079,6 @@ public final class AdminProtos {
return mergeable_; return mergeable_;
} }
public static final int BEST_SPLIT_ROW_FIELD_NUMBER = 6;
private org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bestSplitRow_;
/**
* <pre>
* Get bestSplitRow
* </pre>
*
* <code>optional bytes best_split_row = 6;</code>
*/
public boolean hasBestSplitRow() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* <pre>
* Get bestSplitRow
* </pre>
*
* <code>optional bytes best_split_row = 6;</code>
*/
public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString getBestSplitRow() {
return bestSplitRow_;
}
private byte memoizedIsInitialized = -1; private byte memoizedIsInitialized = -1;
public final boolean isInitialized() { public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized; byte isInitialized = memoizedIsInitialized;
@ -1248,9 +1114,6 @@ public final class AdminProtos {
if (((bitField0_ & 0x00000010) == 0x00000010)) { if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeBool(5, mergeable_); output.writeBool(5, mergeable_);
} }
if (((bitField0_ & 0x00000020) == 0x00000020)) {
output.writeBytes(6, bestSplitRow_);
}
unknownFields.writeTo(output); unknownFields.writeTo(output);
} }
@ -1279,10 +1142,6 @@ public final class AdminProtos {
size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
.computeBoolSize(5, mergeable_); .computeBoolSize(5, mergeable_);
} }
if (((bitField0_ & 0x00000020) == 0x00000020)) {
size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
.computeBytesSize(6, bestSplitRow_);
}
size += unknownFields.getSerializedSize(); size += unknownFields.getSerializedSize();
memoizedSize = size; memoizedSize = size;
return size; return size;
@ -1324,11 +1183,6 @@ public final class AdminProtos {
result = result && (getMergeable() result = result && (getMergeable()
== other.getMergeable()); == other.getMergeable());
} }
result = result && (hasBestSplitRow() == other.hasBestSplitRow());
if (hasBestSplitRow()) {
result = result && getBestSplitRow()
.equals(other.getBestSplitRow());
}
result = result && unknownFields.equals(other.unknownFields); result = result && unknownFields.equals(other.unknownFields);
return result; return result;
} }
@ -1363,10 +1217,6 @@ public final class AdminProtos {
hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashBoolean( hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashBoolean(
getMergeable()); getMergeable());
} }
if (hasBestSplitRow()) {
hash = (37 * hash) + BEST_SPLIT_ROW_FIELD_NUMBER;
hash = (53 * hash) + getBestSplitRow().hashCode();
}
hash = (29 * hash) + unknownFields.hashCode(); hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash; memoizedHashCode = hash;
return hash; return hash;
@ -1500,8 +1350,6 @@ public final class AdminProtos {
bitField0_ = (bitField0_ & ~0x00000008); bitField0_ = (bitField0_ & ~0x00000008);
mergeable_ = false; mergeable_ = false;
bitField0_ = (bitField0_ & ~0x00000010); bitField0_ = (bitField0_ & ~0x00000010);
bestSplitRow_ = org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.EMPTY;
bitField0_ = (bitField0_ & ~0x00000020);
return this; return this;
} }
@ -1550,10 +1398,6 @@ public final class AdminProtos {
to_bitField0_ |= 0x00000010; to_bitField0_ |= 0x00000010;
} }
result.mergeable_ = mergeable_; result.mergeable_ = mergeable_;
if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
to_bitField0_ |= 0x00000020;
}
result.bestSplitRow_ = bestSplitRow_;
result.bitField0_ = to_bitField0_; result.bitField0_ = to_bitField0_;
onBuilt(); onBuilt();
return result; return result;
@ -1611,9 +1455,6 @@ public final class AdminProtos {
if (other.hasMergeable()) { if (other.hasMergeable()) {
setMergeable(other.getMergeable()); setMergeable(other.getMergeable());
} }
if (other.hasBestSplitRow()) {
setBestSplitRow(other.getBestSplitRow());
}
this.mergeUnknownFields(other.unknownFields); this.mergeUnknownFields(other.unknownFields);
onChanged(); onChanged();
return this; return this;
@ -1929,57 +1770,6 @@ public final class AdminProtos {
onChanged(); onChanged();
return this; return this;
} }
private org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bestSplitRow_ = org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.EMPTY;
/**
* <pre>
* Get bestSplitRow
* </pre>
*
* <code>optional bytes best_split_row = 6;</code>
*/
public boolean hasBestSplitRow() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* <pre>
* Get bestSplitRow
* </pre>
*
* <code>optional bytes best_split_row = 6;</code>
*/
public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString getBestSplitRow() {
return bestSplitRow_;
}
/**
* <pre>
* Get bestSplitRow
* </pre>
*
* <code>optional bytes best_split_row = 6;</code>
*/
public Builder setBestSplitRow(org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000020;
bestSplitRow_ = value;
onChanged();
return this;
}
/**
* <pre>
* Get bestSplitRow
* </pre>
*
* <code>optional bytes best_split_row = 6;</code>
*/
public Builder clearBestSplitRow() {
bitField0_ = (bitField0_ & ~0x00000020);
bestSplitRow_ = getDefaultInstance().getBestSplitRow();
onChanged();
return this;
}
public final Builder setUnknownFields( public final Builder setUnknownFields(
final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields); return super.setUnknownFields(unknownFields);
@ -30431,141 +30221,140 @@ public final class AdminProtos {
java.lang.String[] descriptorData = { java.lang.String[] descriptorData = {
"\n\013Admin.proto\022\010hbase.pb\032\023ClusterStatus.p" + "\n\013Admin.proto\022\010hbase.pb\032\023ClusterStatus.p" +
"roto\032\013HBase.proto\032\tWAL.proto\032\013Quota.prot" + "roto\032\013HBase.proto\032\tWAL.proto\032\013Quota.prot" +
"o\"s\n\024GetRegionInfoRequest\022)\n\006region\030\001 \002(" + "o\"[\n\024GetRegionInfoRequest\022)\n\006region\030\001 \002(" +
"\0132\031.hbase.pb.RegionSpecifier\022\030\n\020compacti" + "\0132\031.hbase.pb.RegionSpecifier\022\030\n\020compacti" +
"on_state\030\002 \001(\010\022\026\n\016best_split_row\030\003 \001(\010\"\252" + "on_state\030\002 \001(\010\"\222\002\n\025GetRegionInfoResponse" +
"\002\n\025GetRegionInfoResponse\022)\n\013region_info\030" + "\022)\n\013region_info\030\001 \002(\0132\024.hbase.pb.RegionI" +
"\001 \002(\0132\024.hbase.pb.RegionInfo\022I\n\020compactio" + "nfo\022I\n\020compaction_state\030\002 \001(\0162/.hbase.pb" +
"n_state\030\002 \001(\0162/.hbase.pb.GetRegionInfoRe" + ".GetRegionInfoResponse.CompactionState\022\024" +
"sponse.CompactionState\022\024\n\014isRecovering\030\003" + "\n\014isRecovering\030\003 \001(\010\022\022\n\nsplittable\030\004 \001(\010" +
" \001(\010\022\022\n\nsplittable\030\004 \001(\010\022\021\n\tmergeable\030\005 ", "\022\021\n\tmergeable\030\005 \001(\010\"F\n\017CompactionState\022\010",
"\001(\010\022\026\n\016best_split_row\030\006 \001(\014\"F\n\017Compactio" + "\n\004NONE\020\000\022\t\n\005MINOR\020\001\022\t\n\005MAJOR\020\002\022\023\n\017MAJOR_" +
"nState\022\010\n\004NONE\020\000\022\t\n\005MINOR\020\001\022\t\n\005MAJOR\020\002\022\023" + "AND_MINOR\020\003\"P\n\023GetStoreFileRequest\022)\n\006re" +
"\n\017MAJOR_AND_MINOR\020\003\"P\n\023GetStoreFileReque" + "gion\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022\016\n" +
"st\022)\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSpec" + "\006family\030\002 \003(\014\"*\n\024GetStoreFileResponse\022\022\n" +
"ifier\022\016\n\006family\030\002 \003(\014\"*\n\024GetStoreFileRes" + "\nstore_file\030\001 \003(\t\"\030\n\026GetOnlineRegionRequ" +
"ponse\022\022\n\nstore_file\030\001 \003(\t\"\030\n\026GetOnlineRe" + "est\"D\n\027GetOnlineRegionResponse\022)\n\013region" +
"gionRequest\"D\n\027GetOnlineRegionResponse\022)" + "_info\030\001 \003(\0132\024.hbase.pb.RegionInfo\"\263\002\n\021Op" +
"\n\013region_info\030\001 \003(\0132\024.hbase.pb.RegionInf" + "enRegionRequest\022=\n\topen_info\030\001 \003(\0132*.hba" +
"o\"\263\002\n\021OpenRegionRequest\022=\n\topen_info\030\001 \003" + "se.pb.OpenRegionRequest.RegionOpenInfo\022\027" +
"(\0132*.hbase.pb.OpenRegionRequest.RegionOp", "\n\017serverStartCode\030\002 \001(\004\022\032\n\022master_system",
"enInfo\022\027\n\017serverStartCode\030\002 \001(\004\022\032\n\022maste" + "_time\030\005 \001(\004\032\251\001\n\016RegionOpenInfo\022$\n\006region" +
"r_system_time\030\005 \001(\004\032\251\001\n\016RegionOpenInfo\022$" + "\030\001 \002(\0132\024.hbase.pb.RegionInfo\022\037\n\027version_" +
"\n\006region\030\001 \002(\0132\024.hbase.pb.RegionInfo\022\037\n\027" + "of_offline_node\030\002 \001(\r\022+\n\rfavored_nodes\030\003" +
"version_of_offline_node\030\002 \001(\r\022+\n\rfavored" + " \003(\0132\024.hbase.pb.ServerName\022#\n\033openForDis" +
"_nodes\030\003 \003(\0132\024.hbase.pb.ServerName\022#\n\033op" + "tributedLogReplay\030\004 \001(\010\"\246\001\n\022OpenRegionRe" +
"enForDistributedLogReplay\030\004 \001(\010\"\246\001\n\022Open" + "sponse\022F\n\ropening_state\030\001 \003(\0162/.hbase.pb" +
"RegionResponse\022F\n\ropening_state\030\001 \003(\0162/." + ".OpenRegionResponse.RegionOpeningState\"H" +
"hbase.pb.OpenRegionResponse.RegionOpenin" + "\n\022RegionOpeningState\022\n\n\006OPENED\020\000\022\022\n\016ALRE" +
"gState\"H\n\022RegionOpeningState\022\n\n\006OPENED\020\000" + "ADY_OPENED\020\001\022\022\n\016FAILED_OPENING\020\002\"?\n\023Warm" +
"\022\022\n\016ALREADY_OPENED\020\001\022\022\n\016FAILED_OPENING\020\002", "upRegionRequest\022(\n\nregionInfo\030\001 \002(\0132\024.hb",
"\"?\n\023WarmupRegionRequest\022(\n\nregionInfo\030\001 " + "ase.pb.RegionInfo\"\026\n\024WarmupRegionRespons" +
"\002(\0132\024.hbase.pb.RegionInfo\"\026\n\024WarmupRegio" + "e\"\313\001\n\022CloseRegionRequest\022)\n\006region\030\001 \002(\013" +
"nResponse\"\313\001\n\022CloseRegionRequest\022)\n\006regi" + "2\031.hbase.pb.RegionSpecifier\022\037\n\027version_o" +
"on\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022\037\n\027v" + "f_closing_node\030\002 \001(\r\022\036\n\020transition_in_ZK" +
"ersion_of_closing_node\030\002 \001(\r\022\036\n\020transiti" + "\030\003 \001(\010:\004true\0220\n\022destination_server\030\004 \001(\013" +
"on_in_ZK\030\003 \001(\010:\004true\0220\n\022destination_serv" + "2\024.hbase.pb.ServerName\022\027\n\017serverStartCod" +
"er\030\004 \001(\0132\024.hbase.pb.ServerName\022\027\n\017server" + "e\030\005 \001(\004\"%\n\023CloseRegionResponse\022\016\n\006closed" +
"StartCode\030\005 \001(\004\"%\n\023CloseRegionResponse\022\016" + "\030\001 \002(\010\"y\n\022FlushRegionRequest\022)\n\006region\030\001" +
"\n\006closed\030\001 \002(\010\"y\n\022FlushRegionRequest\022)\n\006" + " \002(\0132\031.hbase.pb.RegionSpecifier\022\030\n\020if_ol" +
"region\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022", "der_than_ts\030\002 \001(\004\022\036\n\026write_flush_wal_mar",
"\030\n\020if_older_than_ts\030\002 \001(\004\022\036\n\026write_flush" + "ker\030\003 \001(\010\"_\n\023FlushRegionResponse\022\027\n\017last" +
"_wal_marker\030\003 \001(\010\"_\n\023FlushRegionResponse" + "_flush_time\030\001 \002(\004\022\017\n\007flushed\030\002 \001(\010\022\036\n\026wr" +
"\022\027\n\017last_flush_time\030\001 \002(\004\022\017\n\007flushed\030\002 \001" + "ote_flush_wal_marker\030\003 \001(\010\"T\n\022SplitRegio" +
"(\010\022\036\n\026wrote_flush_wal_marker\030\003 \001(\010\"T\n\022Sp" + "nRequest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.Regi" +
"litRegionRequest\022)\n\006region\030\001 \002(\0132\031.hbase" + "onSpecifier\022\023\n\013split_point\030\002 \001(\014\"\025\n\023Spli" +
".pb.RegionSpecifier\022\023\n\013split_point\030\002 \001(\014" + "tRegionResponse\"`\n\024CompactRegionRequest\022" +
"\"\025\n\023SplitRegionResponse\"`\n\024CompactRegion" + ")\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSpecifi" +
"Request\022)\n\006region\030\001 \002(\0132\031.hbase.pb.Regio" + "er\022\r\n\005major\030\002 \001(\010\022\016\n\006family\030\003 \001(\014\"\027\n\025Com" +
"nSpecifier\022\r\n\005major\030\002 \001(\010\022\016\n\006family\030\003 \001(" + "pactRegionResponse\"\315\001\n\031UpdateFavoredNode" +
"\014\"\027\n\025CompactRegionResponse\"\315\001\n\031UpdateFav", "sRequest\022I\n\013update_info\030\001 \003(\01324.hbase.pb",
"oredNodesRequest\022I\n\013update_info\030\001 \003(\01324." + ".UpdateFavoredNodesRequest.RegionUpdateI" +
"hbase.pb.UpdateFavoredNodesRequest.Regio" + "nfo\032e\n\020RegionUpdateInfo\022$\n\006region\030\001 \002(\0132" +
"nUpdateInfo\032e\n\020RegionUpdateInfo\022$\n\006regio" + "\024.hbase.pb.RegionInfo\022+\n\rfavored_nodes\030\002" +
"n\030\001 \002(\0132\024.hbase.pb.RegionInfo\022+\n\rfavored" + " \003(\0132\024.hbase.pb.ServerName\".\n\032UpdateFavo" +
"_nodes\030\002 \003(\0132\024.hbase.pb.ServerName\".\n\032Up" + "redNodesResponse\022\020\n\010response\030\001 \001(\r\"a\n\010WA" +
"dateFavoredNodesResponse\022\020\n\010response\030\001 \001" + "LEntry\022\035\n\003key\030\001 \002(\0132\020.hbase.pb.WALKey\022\027\n" +
"(\r\"a\n\010WALEntry\022\035\n\003key\030\001 \002(\0132\020.hbase.pb.W" + "\017key_value_bytes\030\002 \003(\014\022\035\n\025associated_cel" +
"ALKey\022\027\n\017key_value_bytes\030\002 \003(\014\022\035\n\025associ" + "l_count\030\003 \001(\005\"\242\001\n\030ReplicateWALEntryReque" +
"ated_cell_count\030\003 \001(\005\"\242\001\n\030ReplicateWALEn" + "st\022!\n\005entry\030\001 \003(\0132\022.hbase.pb.WALEntry\022\034\n" +
"tryRequest\022!\n\005entry\030\001 \003(\0132\022.hbase.pb.WAL", "\024replicationClusterId\030\002 \001(\t\022\"\n\032sourceBas",
"Entry\022\034\n\024replicationClusterId\030\002 \001(\t\022\"\n\032s" + "eNamespaceDirPath\030\003 \001(\t\022!\n\031sourceHFileAr" +
"ourceBaseNamespaceDirPath\030\003 \001(\t\022!\n\031sourc" + "chiveDirPath\030\004 \001(\t\"\033\n\031ReplicateWALEntryR" +
"eHFileArchiveDirPath\030\004 \001(\t\"\033\n\031ReplicateW" + "esponse\"\026\n\024RollWALWriterRequest\"0\n\025RollW" +
"ALEntryResponse\"\026\n\024RollWALWriterRequest\"" + "ALWriterResponse\022\027\n\017region_to_flush\030\001 \003(" +
"0\n\025RollWALWriterResponse\022\027\n\017region_to_fl" + "\014\"#\n\021StopServerRequest\022\016\n\006reason\030\001 \002(\t\"\024" +
"ush\030\001 \003(\014\"#\n\021StopServerRequest\022\016\n\006reason" + "\n\022StopServerResponse\"\026\n\024GetServerInfoReq" +
"\030\001 \002(\t\"\024\n\022StopServerResponse\"\026\n\024GetServe" + "uest\"K\n\nServerInfo\022)\n\013server_name\030\001 \002(\0132" +
"rInfoRequest\"K\n\nServerInfo\022)\n\013server_nam" + "\024.hbase.pb.ServerName\022\022\n\nwebui_port\030\002 \001(" +
"e\030\001 \002(\0132\024.hbase.pb.ServerName\022\022\n\nwebui_p" + "\r\"B\n\025GetServerInfoResponse\022)\n\013server_inf" +
"ort\030\002 \001(\r\"B\n\025GetServerInfoResponse\022)\n\013se", "o\030\001 \002(\0132\024.hbase.pb.ServerInfo\"\034\n\032UpdateC",
"rver_info\030\001 \002(\0132\024.hbase.pb.ServerInfo\"\034\n" + "onfigurationRequest\"\035\n\033UpdateConfigurati" +
"\032UpdateConfigurationRequest\"\035\n\033UpdateCon" + "onResponse\"?\n\024GetRegionLoadRequest\022\'\n\nta" +
"figurationResponse\"?\n\024GetRegionLoadReque" + "ble_name\030\001 \001(\0132\023.hbase.pb.TableName\"C\n\025G" +
"st\022\'\n\ntable_name\030\001 \001(\0132\023.hbase.pb.TableN" + "etRegionLoadResponse\022*\n\014region_loads\030\001 \003" +
"ame\"C\n\025GetRegionLoadResponse\022*\n\014region_l" + "(\0132\024.hbase.pb.RegionLoad\"2\n\034ClearCompact" +
"oads\030\001 \003(\0132\024.hbase.pb.RegionLoad\"2\n\034Clea" + "ionQueuesRequest\022\022\n\nqueue_name\030\001 \003(\t\"\037\n\035" +
"rCompactionQueuesRequest\022\022\n\nqueue_name\030\001" + "ClearCompactionQueuesResponse\"\200\001\n\030Execut" +
" \003(\t\"\037\n\035ClearCompactionQueuesResponse\"\200\001" + "eProceduresRequest\0220\n\013open_region\030\001 \003(\0132" +
"\n\030ExecuteProceduresRequest\0220\n\013open_regio" + "\033.hbase.pb.OpenRegionRequest\0222\n\014close_re" +
"n\030\001 \003(\0132\033.hbase.pb.OpenRegionRequest\0222\n\014", "gion\030\002 \003(\0132\034.hbase.pb.CloseRegionRequest",
"close_region\030\002 \003(\0132\034.hbase.pb.CloseRegio" + "\"\203\001\n\031ExecuteProceduresResponse\0221\n\013open_r" +
"nRequest\"\203\001\n\031ExecuteProceduresResponse\0221" + "egion\030\001 \003(\0132\034.hbase.pb.OpenRegionRespons" +
"\n\013open_region\030\001 \003(\0132\034.hbase.pb.OpenRegio" + "e\0223\n\014close_region\030\002 \003(\0132\035.hbase.pb.Close" +
"nResponse\0223\n\014close_region\030\002 \003(\0132\035.hbase." + "RegionResponse\"\244\001\n\023MergeRegionsRequest\022+" +
"pb.CloseRegionResponse\"\244\001\n\023MergeRegionsR" + "\n\010region_a\030\001 \002(\0132\031.hbase.pb.RegionSpecif" +
"equest\022+\n\010region_a\030\001 \002(\0132\031.hbase.pb.Regi" + "ier\022+\n\010region_b\030\002 \002(\0132\031.hbase.pb.RegionS" +
"onSpecifier\022+\n\010region_b\030\002 \002(\0132\031.hbase.pb" + "pecifier\022\027\n\010forcible\030\003 \001(\010:\005false\022\032\n\022mas" +
".RegionSpecifier\022\027\n\010forcible\030\003 \001(\010:\005fals" + "ter_system_time\030\004 \001(\004\"\026\n\024MergeRegionsRes" +
"e\022\032\n\022master_system_time\030\004 \001(\004\"\026\n\024MergeRe" + "ponse2\216\016\n\014AdminService\022P\n\rGetRegionInfo\022" +
"gionsResponse2\216\016\n\014AdminService\022P\n\rGetReg", "\036.hbase.pb.GetRegionInfoRequest\032\037.hbase.",
"ionInfo\022\036.hbase.pb.GetRegionInfoRequest\032" + "pb.GetRegionInfoResponse\022M\n\014GetStoreFile" +
"\037.hbase.pb.GetRegionInfoResponse\022M\n\014GetS" + "\022\035.hbase.pb.GetStoreFileRequest\032\036.hbase." +
"toreFile\022\035.hbase.pb.GetStoreFileRequest\032" + "pb.GetStoreFileResponse\022V\n\017GetOnlineRegi" +
"\036.hbase.pb.GetStoreFileResponse\022V\n\017GetOn" + "on\022 .hbase.pb.GetOnlineRegionRequest\032!.h" +
"lineRegion\022 .hbase.pb.GetOnlineRegionReq" + "base.pb.GetOnlineRegionResponse\022G\n\nOpenR" +
"uest\032!.hbase.pb.GetOnlineRegionResponse\022" + "egion\022\033.hbase.pb.OpenRegionRequest\032\034.hba" +
"G\n\nOpenRegion\022\033.hbase.pb.OpenRegionReque" + "se.pb.OpenRegionResponse\022M\n\014WarmupRegion" +
"st\032\034.hbase.pb.OpenRegionResponse\022M\n\014Warm" + "\022\035.hbase.pb.WarmupRegionRequest\032\036.hbase." +
"upRegion\022\035.hbase.pb.WarmupRegionRequest\032" + "pb.WarmupRegionResponse\022J\n\013CloseRegion\022\034" +
"\036.hbase.pb.WarmupRegionResponse\022J\n\013Close", ".hbase.pb.CloseRegionRequest\032\035.hbase.pb.",
"Region\022\034.hbase.pb.CloseRegionRequest\032\035.h" + "CloseRegionResponse\022J\n\013FlushRegion\022\034.hba" +
"base.pb.CloseRegionResponse\022J\n\013FlushRegi" + "se.pb.FlushRegionRequest\032\035.hbase.pb.Flus" +
"on\022\034.hbase.pb.FlushRegionRequest\032\035.hbase" + "hRegionResponse\022J\n\013SplitRegion\022\034.hbase.p" +
".pb.FlushRegionResponse\022J\n\013SplitRegion\022\034" + "b.SplitRegionRequest\032\035.hbase.pb.SplitReg" +
".hbase.pb.SplitRegionRequest\032\035.hbase.pb." + "ionResponse\022P\n\rCompactRegion\022\036.hbase.pb." +
"SplitRegionResponse\022P\n\rCompactRegion\022\036.h" + "CompactRegionRequest\032\037.hbase.pb.CompactR" +
"base.pb.CompactRegionRequest\032\037.hbase.pb." + "egionResponse\022\\\n\021ReplicateWALEntry\022\".hba" +
"CompactRegionResponse\022\\\n\021ReplicateWALEnt" + "se.pb.ReplicateWALEntryRequest\032#.hbase.p" +
"ry\022\".hbase.pb.ReplicateWALEntryRequest\032#" + "b.ReplicateWALEntryResponse\022Q\n\006Replay\022\"." +
".hbase.pb.ReplicateWALEntryResponse\022Q\n\006R", "hbase.pb.ReplicateWALEntryRequest\032#.hbas",
"eplay\022\".hbase.pb.ReplicateWALEntryReques" + "e.pb.ReplicateWALEntryResponse\022P\n\rRollWA" +
"t\032#.hbase.pb.ReplicateWALEntryResponse\022P" + "LWriter\022\036.hbase.pb.RollWALWriterRequest\032" +
"\n\rRollWALWriter\022\036.hbase.pb.RollWALWriter" + "\037.hbase.pb.RollWALWriterResponse\022P\n\rGetS" +
"Request\032\037.hbase.pb.RollWALWriterResponse" + "erverInfo\022\036.hbase.pb.GetServerInfoReques" +
"\022P\n\rGetServerInfo\022\036.hbase.pb.GetServerIn" + "t\032\037.hbase.pb.GetServerInfoResponse\022G\n\nSt" +
"foRequest\032\037.hbase.pb.GetServerInfoRespon" + "opServer\022\033.hbase.pb.StopServerRequest\032\034." +
"se\022G\n\nStopServer\022\033.hbase.pb.StopServerRe" + "hbase.pb.StopServerResponse\022_\n\022UpdateFav" +
"quest\032\034.hbase.pb.StopServerResponse\022_\n\022U" + "oredNodes\022#.hbase.pb.UpdateFavoredNodesR" +
"pdateFavoredNodes\022#.hbase.pb.UpdateFavor" + "equest\032$.hbase.pb.UpdateFavoredNodesResp" +
"edNodesRequest\032$.hbase.pb.UpdateFavoredN", "onse\022b\n\023UpdateConfiguration\022$.hbase.pb.U",
"odesResponse\022b\n\023UpdateConfiguration\022$.hb" + "pdateConfigurationRequest\032%.hbase.pb.Upd" +
"ase.pb.UpdateConfigurationRequest\032%.hbas" + "ateConfigurationResponse\022P\n\rGetRegionLoa" +
"e.pb.UpdateConfigurationResponse\022P\n\rGetR" + "d\022\036.hbase.pb.GetRegionLoadRequest\032\037.hbas" +
"egionLoad\022\036.hbase.pb.GetRegionLoadReques" + "e.pb.GetRegionLoadResponse\022h\n\025ClearCompa" +
"t\032\037.hbase.pb.GetRegionLoadResponse\022h\n\025Cl" + "ctionQueues\022&.hbase.pb.ClearCompactionQu" +
"earCompactionQueues\022&.hbase.pb.ClearComp" + "euesRequest\032\'.hbase.pb.ClearCompactionQu" +
"actionQueuesRequest\032\'.hbase.pb.ClearComp" + "euesResponse\022k\n\026GetSpaceQuotaSnapshots\022\'" +
"actionQueuesResponse\022k\n\026GetSpaceQuotaSna" + ".hbase.pb.GetSpaceQuotaSnapshotsRequest\032" +
"pshots\022\'.hbase.pb.GetSpaceQuotaSnapshots" + "(.hbase.pb.GetSpaceQuotaSnapshotsRespons" +
"Request\032(.hbase.pb.GetSpaceQuotaSnapshot", "e\022\\\n\021ExecuteProcedures\022\".hbase.pb.Execut",
"sResponse\022\\\n\021ExecuteProcedures\022\".hbase.p" + "eProceduresRequest\032#.hbase.pb.ExecutePro" +
"b.ExecuteProceduresRequest\032#.hbase.pb.Ex" + "ceduresResponse\022M\n\014MergeRegions\022\035.hbase." +
"ecuteProceduresResponse\022M\n\014MergeRegions\022" + "pb.MergeRegionsRequest\032\036.hbase.pb.MergeR" +
"\035.hbase.pb.MergeRegionsRequest\032\036.hbase.p" + "egionsResponseBH\n1org.apache.hadoop.hbas" +
"b.MergeRegionsResponseBH\n1org.apache.had" + "e.shaded.protobuf.generatedB\013AdminProtos" +
"oop.hbase.shaded.protobuf.generatedB\013Adm" + "H\001\210\001\001\240\001\001"
"inProtosH\001\210\001\001\240\001\001"
}; };
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() { new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() {
@ -30588,13 +30377,13 @@ public final class AdminProtos {
internal_static_hbase_pb_GetRegionInfoRequest_fieldAccessorTable = new internal_static_hbase_pb_GetRegionInfoRequest_fieldAccessorTable = new
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hbase_pb_GetRegionInfoRequest_descriptor, internal_static_hbase_pb_GetRegionInfoRequest_descriptor,
new java.lang.String[] { "Region", "CompactionState", "BestSplitRow", }); new java.lang.String[] { "Region", "CompactionState", });
internal_static_hbase_pb_GetRegionInfoResponse_descriptor = internal_static_hbase_pb_GetRegionInfoResponse_descriptor =
getDescriptor().getMessageTypes().get(1); getDescriptor().getMessageTypes().get(1);
internal_static_hbase_pb_GetRegionInfoResponse_fieldAccessorTable = new internal_static_hbase_pb_GetRegionInfoResponse_fieldAccessorTable = new
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hbase_pb_GetRegionInfoResponse_descriptor, internal_static_hbase_pb_GetRegionInfoResponse_descriptor,
new java.lang.String[] { "RegionInfo", "CompactionState", "IsRecovering", "Splittable", "Mergeable", "BestSplitRow", }); new java.lang.String[] { "RegionInfo", "CompactionState", "IsRecovering", "Splittable", "Mergeable", });
internal_static_hbase_pb_GetStoreFileRequest_descriptor = internal_static_hbase_pb_GetStoreFileRequest_descriptor =
getDescriptor().getMessageTypes().get(2); getDescriptor().getMessageTypes().get(2);
internal_static_hbase_pb_GetStoreFileRequest_fieldAccessorTable = new internal_static_hbase_pb_GetStoreFileRequest_fieldAccessorTable = new

View File

@ -22985,7 +22985,7 @@ public final class ClientProtos {
* <code>optional .hbase.pb.Cursor cursor = 12;</code> * <code>optional .hbase.pb.Cursor cursor = 12;</code>
*/ */
private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor, org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CursorOrBuilder> org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor, org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CursorOrBuilder>
getCursorFieldBuilder() { getCursorFieldBuilder() {
if (cursorBuilder_ == null) { if (cursorBuilder_ == null) {
cursorBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< cursorBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
@ -41831,7 +41831,7 @@ public final class ClientProtos {
internal_static_hbase_pb_ScanRequest_fieldAccessorTable; internal_static_hbase_pb_ScanRequest_fieldAccessorTable;
private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
internal_static_hbase_pb_Cursor_descriptor; internal_static_hbase_pb_Cursor_descriptor;
private static final private static final
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hbase_pb_Cursor_fieldAccessorTable; internal_static_hbase_pb_Cursor_fieldAccessorTable;
private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor

View File

@ -11552,11 +11552,11 @@ public final class MasterProtos {
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder(); org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder();
/** /**
* <code>optional bytes split_row = 2;</code> * <code>required bytes split_row = 2;</code>
*/ */
boolean hasSplitRow(); boolean hasSplitRow();
/** /**
* <code>optional bytes split_row = 2;</code> * <code>required bytes split_row = 2;</code>
*/ */
org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString getSplitRow(); org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString getSplitRow();
@ -11700,13 +11700,13 @@ public final class MasterProtos {
public static final int SPLIT_ROW_FIELD_NUMBER = 2; public static final int SPLIT_ROW_FIELD_NUMBER = 2;
private org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString splitRow_; private org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString splitRow_;
/** /**
* <code>optional bytes split_row = 2;</code> * <code>required bytes split_row = 2;</code>
*/ */
public boolean hasSplitRow() { public boolean hasSplitRow() {
return ((bitField0_ & 0x00000002) == 0x00000002); return ((bitField0_ & 0x00000002) == 0x00000002);
} }
/** /**
* <code>optional bytes split_row = 2;</code> * <code>required bytes split_row = 2;</code>
*/ */
public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString getSplitRow() { public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString getSplitRow() {
return splitRow_; return splitRow_;
@ -11752,6 +11752,10 @@ public final class MasterProtos {
memoizedIsInitialized = 0; memoizedIsInitialized = 0;
return false; return false;
} }
if (!hasSplitRow()) {
memoizedIsInitialized = 0;
return false;
}
if (!getRegionInfo().isInitialized()) { if (!getRegionInfo().isInitialized()) {
memoizedIsInitialized = 0; memoizedIsInitialized = 0;
return false; return false;
@ -12102,6 +12106,9 @@ public final class MasterProtos {
if (!hasRegionInfo()) { if (!hasRegionInfo()) {
return false; return false;
} }
if (!hasSplitRow()) {
return false;
}
if (!getRegionInfo().isInitialized()) { if (!getRegionInfo().isInitialized()) {
return false; return false;
} }
@ -12247,19 +12254,19 @@ public final class MasterProtos {
private org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString splitRow_ = org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.EMPTY; private org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString splitRow_ = org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.EMPTY;
/** /**
* <code>optional bytes split_row = 2;</code> * <code>required bytes split_row = 2;</code>
*/ */
public boolean hasSplitRow() { public boolean hasSplitRow() {
return ((bitField0_ & 0x00000002) == 0x00000002); return ((bitField0_ & 0x00000002) == 0x00000002);
} }
/** /**
* <code>optional bytes split_row = 2;</code> * <code>required bytes split_row = 2;</code>
*/ */
public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString getSplitRow() { public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString getSplitRow() {
return splitRow_; return splitRow_;
} }
/** /**
* <code>optional bytes split_row = 2;</code> * <code>required bytes split_row = 2;</code>
*/ */
public Builder setSplitRow(org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString value) { public Builder setSplitRow(org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString value) {
if (value == null) { if (value == null) {
@ -12271,7 +12278,7 @@ public final class MasterProtos {
return this; return this;
} }
/** /**
* <code>optional bytes split_row = 2;</code> * <code>required bytes split_row = 2;</code>
*/ */
public Builder clearSplitRow() { public Builder clearSplitRow() {
bitField0_ = (bitField0_ & ~0x00000002); bitField0_ = (bitField0_ & ~0x00000002);
@ -81236,7 +81243,7 @@ public final class MasterProtos {
".pb.RegionSpecifier\"\027\n\025OfflineRegionResp", ".pb.RegionSpecifier\"\027\n\025OfflineRegionResp",
"onse\"\201\001\n\027SplitTableRegionRequest\022)\n\013regi" + "onse\"\201\001\n\027SplitTableRegionRequest\022)\n\013regi" +
"on_info\030\001 \002(\0132\024.hbase.pb.RegionInfo\022\021\n\ts" + "on_info\030\001 \002(\0132\024.hbase.pb.RegionInfo\022\021\n\ts" +
"plit_row\030\002 \001(\014\022\026\n\013nonce_group\030\003 \001(\004:\0010\022\020" + "plit_row\030\002 \002(\014\022\026\n\013nonce_group\030\003 \001(\004:\0010\022\020" +
"\n\005nonce\030\004 \001(\004:\0010\"+\n\030SplitTableRegionResp" + "\n\005nonce\030\004 \001(\004:\0010\"+\n\030SplitTableRegionResp" +
"onse\022\017\n\007proc_id\030\001 \001(\004\"\177\n\022CreateTableRequ" + "onse\022\017\n\007proc_id\030\001 \001(\004\"\177\n\022CreateTableRequ" +
"est\022+\n\014table_schema\030\001 \002(\0132\025.hbase.pb.Tab" + "est\022+\n\014table_schema\030\001 \002(\0132\025.hbase.pb.Tab" +

View File

@ -33,7 +33,6 @@ import "Quota.proto";
message GetRegionInfoRequest { message GetRegionInfoRequest {
required RegionSpecifier region = 1; required RegionSpecifier region = 1;
optional bool compaction_state = 2; optional bool compaction_state = 2;
optional bool best_split_row = 3;
} }
message GetRegionInfoResponse { message GetRegionInfoResponse {
@ -44,8 +43,6 @@ message GetRegionInfoResponse {
optional bool splittable = 4; optional bool splittable = 4;
// True if region is mergeable, false otherwise. // True if region is mergeable, false otherwise.
optional bool mergeable = 5; optional bool mergeable = 5;
// Get bestSplitRow
optional bytes best_split_row = 6;
enum CompactionState { enum CompactionState {
NONE = 0; NONE = 0;

View File

@ -136,7 +136,7 @@ message OfflineRegionResponse {
message SplitTableRegionRequest { message SplitTableRegionRequest {
required RegionInfo region_info = 1; required RegionInfo region_info = 1;
optional bytes split_row = 2; required bytes split_row = 2;
optional uint64 nonce_group = 3 [default = 0]; optional uint64 nonce_group = 3 [default = 0];
optional uint64 nonce = 4 [default = 0]; optional uint64 nonce = 4 [default = 0];
} }

View File

@ -636,7 +636,7 @@ public class MasterRpcServices extends RSRpcServices
try { try {
long procId = master.splitRegion( long procId = master.splitRegion(
HRegionInfo.convert(request.getRegionInfo()), HRegionInfo.convert(request.getRegionInfo()),
request.hasSplitRow() ? request.getSplitRow().toByteArray() : null, request.getSplitRow().toByteArray(),
request.getNonceGroup(), request.getNonceGroup(),
request.getNonce()); request.getNonce());
return SplitTableRegionResponse.newBuilder().setProcId(procId).build(); return SplitTableRegionResponse.newBuilder().setProcId(procId).build();

View File

@ -67,7 +67,6 @@ import org.apache.hadoop.hbase.master.assignment.RegionStates.ServerState;
import org.apache.hadoop.hbase.master.assignment.RegionStates.ServerStateNode; import org.apache.hadoop.hbase.master.assignment.RegionStates.ServerStateNode;
// TODO: why are they here? // TODO: why are they here?
import org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType; import org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType;
import org.apache.hadoop.hbase.master.normalizer.RegionNormalizer;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler; import org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler;
import org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait; import org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait;
@ -158,8 +157,6 @@ public class AssignmentManager implements ServerListener {
// TODO: why is this different from the listeners (carried over from the old AM) // TODO: why is this different from the listeners (carried over from the old AM)
private RegionStateListener regionStateListener; private RegionStateListener regionStateListener;
private RegionNormalizer regionNormalizer;
private final MetricsAssignmentManager metrics; private final MetricsAssignmentManager metrics;
private final RegionInTransitionChore ritChore; private final RegionInTransitionChore ritChore;
private final MasterServices master; private final MasterServices master;
@ -201,9 +198,6 @@ public class AssignmentManager implements ServerListener {
int ritChoreInterval = conf.getInt(RIT_CHORE_INTERVAL_MSEC_CONF_KEY, int ritChoreInterval = conf.getInt(RIT_CHORE_INTERVAL_MSEC_CONF_KEY,
DEFAULT_RIT_CHORE_INTERVAL_MSEC); DEFAULT_RIT_CHORE_INTERVAL_MSEC);
this.ritChore = new RegionInTransitionChore(ritChoreInterval); this.ritChore = new RegionInTransitionChore(ritChoreInterval);
// Used for region related procedure.
setRegionNormalizer(master.getRegionNormalizer());
} }
public void start() throws IOException { public void start() throws IOException {
@ -307,14 +301,6 @@ public class AssignmentManager implements ServerListener {
this.regionStateListener = listener; this.regionStateListener = listener;
} }
public void setRegionNormalizer(final RegionNormalizer normalizer) {
this.regionNormalizer = normalizer;
}
public RegionNormalizer getRegionNormalizer() {
return regionNormalizer;
}
public RegionStates getRegionStates() { public RegionStates getRegionStates() {
return regionStates; return regionStates;
} }
@ -775,6 +761,16 @@ public class AssignmentManager implements ServerListener {
" hriA=" + hriA + " hriB=" + hriB); " hriA=" + hriA + " hriB=" + hriB);
} }
try {
if (regionStateListener != null) {
regionStateListener.onRegionSplit(parent);
}
} catch (QuotaExceededException e) {
// TODO: does this really belong here?
master.getRegionNormalizer().planSkipped(parent, PlanType.SPLIT);
throw e;
}
// Submit the Split procedure // Submit the Split procedure
final byte[] splitKey = hriB.getStartKey(); final byte[] splitKey = hriB.getStartKey();
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {

View File

@ -49,14 +49,12 @@ import org.apache.hadoop.hbase.master.CatalogJanitor;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.master.MasterFileSystem;
import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.master.RegionState;
import org.apache.hadoop.hbase.master.normalizer.NormalizationPlan;
import org.apache.hadoop.hbase.master.procedure.AbstractStateMachineTableProcedure; import org.apache.hadoop.hbase.master.procedure.AbstractStateMachineTableProcedure;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil; import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil;
import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException; import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException;
import org.apache.hadoop.hbase.procedure2.ProcedureYieldException; import org.apache.hadoop.hbase.procedure2.ProcedureYieldException;
import org.apache.hadoop.hbase.procedure2.ProcedureMetrics; import org.apache.hadoop.hbase.procedure2.ProcedureMetrics;
import org.apache.hadoop.hbase.quotas.QuotaExceededException;
import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
import org.apache.hadoop.hbase.regionserver.HStoreFile; import org.apache.hadoop.hbase.regionserver.HStoreFile;
import org.apache.hadoop.hbase.regionserver.StoreFileInfo; import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
@ -531,13 +529,7 @@ public class MergeTableRegionsProcedure
} }
} }
// TODO: Clean up split and merge. Currently all over the place. // TODO: Clean up split and merge. Currently all over the place.
try { env.getMasterServices().getMasterQuotaManager().onRegionMerged(this.mergedRegion);
env.getMasterServices().getMasterQuotaManager().onRegionMerged(this.mergedRegion);
} catch (QuotaExceededException e) {
env.getAssignmentManager().getRegionNormalizer().planSkipped(this.mergedRegion,
NormalizationPlan.PlanType.MERGE);
throw e;
}
} }
/** /**

View File

@ -54,12 +54,10 @@ import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.master.MasterFileSystem;
import org.apache.hadoop.hbase.master.RegionState.State; import org.apache.hadoop.hbase.master.RegionState.State;
import org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode; import org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode;
import org.apache.hadoop.hbase.master.normalizer.NormalizationPlan;
import org.apache.hadoop.hbase.master.procedure.AbstractStateMachineRegionProcedure; import org.apache.hadoop.hbase.master.procedure.AbstractStateMachineRegionProcedure;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil; import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil;
import org.apache.hadoop.hbase.procedure2.ProcedureMetrics; import org.apache.hadoop.hbase.procedure2.ProcedureMetrics;
import org.apache.hadoop.hbase.quotas.QuotaExceededException;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionState; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionState;
import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
@ -86,7 +84,6 @@ public class SplitTableRegionProcedure
private Boolean traceEnabled = null; private Boolean traceEnabled = null;
private HRegionInfo daughter_1_HRI; private HRegionInfo daughter_1_HRI;
private HRegionInfo daughter_2_HRI; private HRegionInfo daughter_2_HRI;
private byte[] bestSplitRow;
public SplitTableRegionProcedure() { public SplitTableRegionProcedure() {
// Required by the Procedure framework to create the procedure on replay // Required by the Procedure framework to create the procedure on replay
@ -95,70 +92,27 @@ public class SplitTableRegionProcedure
public SplitTableRegionProcedure(final MasterProcedureEnv env, public SplitTableRegionProcedure(final MasterProcedureEnv env,
final HRegionInfo regionToSplit, final byte[] splitRow) throws IOException { final HRegionInfo regionToSplit, final byte[] splitRow) throws IOException {
super(env, regionToSplit); super(env, regionToSplit);
this.bestSplitRow = splitRow;
checkSplittable(env, regionToSplit, bestSplitRow); checkSplitRow(regionToSplit, splitRow);
final TableName table = regionToSplit.getTable(); final TableName table = regionToSplit.getTable();
final long rid = getDaughterRegionIdTimestamp(regionToSplit); final long rid = getDaughterRegionIdTimestamp(regionToSplit);
this.daughter_1_HRI = new HRegionInfo(table, regionToSplit.getStartKey(), bestSplitRow, false, rid); this.daughter_1_HRI = new HRegionInfo(table, regionToSplit.getStartKey(), splitRow, false, rid);
this.daughter_2_HRI = new HRegionInfo(table, bestSplitRow, regionToSplit.getEndKey(), false, rid); this.daughter_2_HRI = new HRegionInfo(table, splitRow, regionToSplit.getEndKey(), false, rid);
} }
/** private static void checkSplitRow(final HRegionInfo regionToSplit, final byte[] splitRow)
* Check whether the region is splittable throws IOException {
* @param env MasterProcedureEnv if (splitRow == null || splitRow.length == 0) {
* @param regionToSplit parent Region to be split throw new DoNotRetryIOException("Split row cannot be null");
* @param splitRow if splitRow is not specified, will first try to get bestSplitRow from RS
* @throws IOException
*/
private void checkSplittable(final MasterProcedureEnv env,
final HRegionInfo regionToSplit, final byte[] splitRow) throws IOException {
// Ask the remote RS if this region is splittable.
// If we get an IOE, report it along w/ the failure so can see why we are not splittable at this time.
if(regionToSplit.getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) {
throw new IllegalArgumentException ("Can't invoke split on non-default regions directly");
}
RegionStateNode node =
env.getAssignmentManager().getRegionStates().getRegionNode(getParentRegion());
IOException splittableCheckIOE = null;
boolean splittable = false;
if (node != null) {
try {
GetRegionInfoResponse response = null;
if (bestSplitRow == null || bestSplitRow.length == 0) {
LOG.info("splitKey isn't explicitly specified, " + " will try to find a best split key from RS");
response =
Util.getRegionInfoResponse(env, node.getRegionLocation(), node.getRegionInfo(), true);
bestSplitRow =
response.hasBestSplitRow() ? response.getBestSplitRow().toByteArray() : null;
} else {
response = Util.getRegionInfoResponse(env, node.getRegionLocation(), node.getRegionInfo(), false);
}
splittable = response.hasSplittable() && response.getSplittable();
if (LOG.isDebugEnabled()) {
LOG.debug("Splittable=" + splittable + " " + node.toShortString());
}
} catch (IOException e) {
splittableCheckIOE = e;
}
} }
if (!splittable) { if (Bytes.equals(regionToSplit.getStartKey(), splitRow)) {
IOException e = new IOException(regionToSplit.getShortNameToLog() + " NOT splittable");
if (splittableCheckIOE != null) e.initCause(splittableCheckIOE);
throw e;
}
if(bestSplitRow == null || bestSplitRow.length == 0) {
throw new DoNotRetryIOException("Region not splittable because bestSplitPoint = null");
}
if (Bytes.equals(regionToSplit.getStartKey(), bestSplitRow)) {
throw new DoNotRetryIOException( throw new DoNotRetryIOException(
"Split row is equal to startkey: " + Bytes.toStringBinary(splitRow)); "Split row is equal to startkey: " + Bytes.toStringBinary(splitRow));
} }
if (!regionToSplit.containsRow(bestSplitRow)) { if (!regionToSplit.containsRow(splitRow)) {
throw new DoNotRetryIOException( throw new DoNotRetryIOException(
"Split row is not inside region key range splitKey:" + Bytes.toStringBinary(splitRow) + "Split row is not inside region key range splitKey:" + Bytes.toStringBinary(splitRow) +
" region: " + regionToSplit); " region: " + regionToSplit);
@ -244,7 +198,6 @@ public class SplitTableRegionProcedure
setFailure(e); setFailure(e);
} }
} }
// if split fails, need to call ((HRegion)parent).clearSplit() when it is a force split
return Flow.HAS_MORE_STATE; return Flow.HAS_MORE_STATE;
} }
@ -414,6 +367,27 @@ public class SplitTableRegionProcedure
Arrays.toString(EXPECTED_SPLIT_STATES))); Arrays.toString(EXPECTED_SPLIT_STATES)));
return false; return false;
} }
// Ask the remote regionserver if this region is splittable. If we get an IOE, report it
// along w/ the failure so can see why we are not splittable at this time.
IOException splittableCheckIOE = null;
boolean splittable = false;
try {
GetRegionInfoResponse response =
Util.getRegionInfoResponse(env, node.getRegionLocation(), node.getRegionInfo());
splittable = response.hasSplittable() && response.getSplittable();
if (LOG.isDebugEnabled()) {
LOG.debug("Splittable=" + splittable + " " + this + " " + node.toShortString());
}
} catch (IOException e) {
splittableCheckIOE = e;
}
if (!splittable) {
IOException e = new IOException(parentHRI.getShortNameToLog() + " NOT splittable");
if (splittableCheckIOE != null) e.initCause(splittableCheckIOE);
setFailure(e);
return false;
}
} }
// Since we have the lock and the master is coordinating the operation // Since we have the lock and the master is coordinating the operation
@ -440,16 +414,6 @@ public class SplitTableRegionProcedure
if (cpHost != null) { if (cpHost != null) {
cpHost.preSplitRegionAction(getTableName(), getSplitRow(), getUser()); cpHost.preSplitRegionAction(getTableName(), getSplitRow(), getUser());
} }
// TODO: Clean up split and merge. Currently all over the place.
// Notify QuotaManager and RegionNormalizer
try {
env.getMasterServices().getMasterQuotaManager().onRegionSplit(this.getParentRegion());
} catch (QuotaExceededException e) {
env.getAssignmentManager().getRegionNormalizer().planSkipped(this.getParentRegion(),
NormalizationPlan.PlanType.SPLIT);
throw e;
}
} }
/** /**

View File

@ -44,24 +44,13 @@ class Util {
*/ */
static GetRegionInfoResponse getRegionInfoResponse(final MasterProcedureEnv env, static GetRegionInfoResponse getRegionInfoResponse(final MasterProcedureEnv env,
final ServerName regionLocation, final HRegionInfo hri) final ServerName regionLocation, final HRegionInfo hri)
throws IOException {
return getRegionInfoResponse(env, regionLocation, hri, false);
}
static GetRegionInfoResponse getRegionInfoResponse(final MasterProcedureEnv env,
final ServerName regionLocation, final HRegionInfo hri, boolean includeBestSplitRow)
throws IOException { throws IOException {
// TODO: There is no timeout on this controller. Set one! // TODO: There is no timeout on this controller. Set one!
HBaseRpcController controller = env.getMasterServices().getClusterConnection(). HBaseRpcController controller = env.getMasterServices().getClusterConnection().
getRpcControllerFactory().newController(); getRpcControllerFactory().newController();
final AdminService.BlockingInterface admin = final AdminService.BlockingInterface admin =
env.getMasterServices().getClusterConnection().getAdmin(regionLocation); env.getMasterServices().getClusterConnection().getAdmin(regionLocation);
GetRegionInfoRequest request = null; GetRegionInfoRequest request = RequestConverter.buildGetRegionInfoRequest(hri.getRegionName());
if (includeBestSplitRow) {
request = RequestConverter.buildGetRegionInfoRequest(hri.getRegionName(), false, true);
} else {
request = RequestConverter.buildGetRegionInfoRequest(hri.getRegionName());
}
try { try {
return admin.getRegionInfo(controller, request); return admin.getRegionInfo(controller, request);
} catch (ServiceException e) { } catch (ServiceException e) {

View File

@ -1673,20 +1673,6 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
requestCount.increment(); requestCount.increment();
Region region = getRegion(request.getRegion()); Region region = getRegion(request.getRegion());
HRegionInfo info = region.getRegionInfo(); HRegionInfo info = region.getRegionInfo();
byte[] bestSplitRow = null;
if (request.hasBestSplitRow() && request.getBestSplitRow()) {
HRegion r = (HRegion) region;
region.startRegionOperation(Operation.SPLIT_REGION);
r.forceSplit(null);
bestSplitRow = r.checkSplit();
// when all table data are in memstore, bestSplitRow = null
// try to flush region first
if(bestSplitRow == null) {
r.flush(true);
bestSplitRow = r.checkSplit();
}
r.clearSplit();
}
GetRegionInfoResponse.Builder builder = GetRegionInfoResponse.newBuilder(); GetRegionInfoResponse.Builder builder = GetRegionInfoResponse.newBuilder();
builder.setRegionInfo(HRegionInfo.convert(info)); builder.setRegionInfo(HRegionInfo.convert(info));
if (request.hasCompactionState() && request.getCompactionState()) { if (request.hasCompactionState() && request.getCompactionState()) {
@ -1695,9 +1681,6 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
builder.setSplittable(region.isSplittable()); builder.setSplittable(region.isSplittable());
builder.setMergeable(region.isMergeable()); builder.setMergeable(region.isMergeable());
builder.setIsRecovering(region.isRecovering()); builder.setIsRecovering(region.isRecovering());
if (request.hasBestSplitRow() && request.getBestSplitRow() && bestSplitRow != null) {
builder.setBestSplitRow(UnsafeByteOperations.unsafeWrap(bestSplitRow));
}
return builder.build(); return builder.build();
} catch (IOException ie) { } catch (IOException ie) {
throw new ServiceException(ie); throw new ServiceException(ie);

View File

@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.backup;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collection; import java.util.Collection;
import java.util.List; import java.util.List;
@ -117,15 +116,7 @@ public class TestIncrementalBackup extends TestBackupBase {
byte[] name = regions.get(0).getRegionInfo().getRegionName(); byte[] name = regions.get(0).getRegionInfo().getRegionName();
long startSplitTime = EnvironmentEdgeManager.currentTime(); long startSplitTime = EnvironmentEdgeManager.currentTime();
try { admin.splitRegion(name);
admin.splitRegion(name);
} catch (IOException e) {
//although split fail, this may not affect following check
//In old split without AM2, if region's best split key is not found,
//there are not exception thrown. But in current API, exception
//will be thrown.
LOG.debug("region is not splittable, because " + e);
}
while (!admin.isTableAvailable(table1)) { while (!admin.isTableAvailable(table1)) {
Thread.sleep(100); Thread.sleep(100);

View File

@ -903,12 +903,10 @@ public class TestAdmin1 {
int[] rowCounts = new int[] { 6000 }; int[] rowCounts = new int[] { 6000 };
int numVersions = HColumnDescriptor.DEFAULT_VERSIONS; int numVersions = HColumnDescriptor.DEFAULT_VERSIONS;
int blockSize = 256; int blockSize = 256;
splitTest(null, familyNames, rowCounts, numVersions, blockSize, true); splitTest(null, familyNames, rowCounts, numVersions, blockSize);
byte[] splitKey = Bytes.toBytes(3500); byte[] splitKey = Bytes.toBytes(3500);
splitTest(splitKey, familyNames, rowCounts, numVersions, blockSize, true); splitTest(splitKey, familyNames, rowCounts, numVersions, blockSize);
// test regionSplitSync
splitTest(splitKey, familyNames, rowCounts, numVersions, blockSize, false);
} }
/** /**
@ -965,23 +963,23 @@ public class TestAdmin1 {
// one of the column families isn't splittable // one of the column families isn't splittable
int[] rowCounts = new int[] { 6000, 1 }; int[] rowCounts = new int[] { 6000, 1 };
splitTest(null, familyNames, rowCounts, numVersions, blockSize, true); splitTest(null, familyNames, rowCounts, numVersions, blockSize);
rowCounts = new int[] { 1, 6000 }; rowCounts = new int[] { 1, 6000 };
splitTest(null, familyNames, rowCounts, numVersions, blockSize, true); splitTest(null, familyNames, rowCounts, numVersions, blockSize);
// one column family has much smaller data than the other // one column family has much smaller data than the other
// the split key should be based on the largest column family // the split key should be based on the largest column family
rowCounts = new int[] { 6000, 300 }; rowCounts = new int[] { 6000, 300 };
splitTest(null, familyNames, rowCounts, numVersions, blockSize, true); splitTest(null, familyNames, rowCounts, numVersions, blockSize);
rowCounts = new int[] { 300, 6000 }; rowCounts = new int[] { 300, 6000 };
splitTest(null, familyNames, rowCounts, numVersions, blockSize, true); splitTest(null, familyNames, rowCounts, numVersions, blockSize);
} }
void splitTest(byte[] splitPoint, byte[][] familyNames, int[] rowCounts, void splitTest(byte[] splitPoint, byte[][] familyNames, int[] rowCounts,
int numVersions, int blockSize, boolean async) throws Exception { int numVersions, int blockSize) throws Exception {
TableName tableName = TableName.valueOf("testForceSplit"); TableName tableName = TableName.valueOf("testForceSplit");
StringBuilder sb = new StringBuilder(); StringBuilder sb = new StringBuilder();
// Add tail to String so can see better in logs where a test is running. // Add tail to String so can see better in logs where a test is running.
@ -1035,42 +1033,39 @@ public class TestAdmin1 {
scanner.next(); scanner.next();
// Split the table // Split the table
if (async) { this.admin.split(tableName, splitPoint);
this.admin.split(tableName, splitPoint);
final AtomicInteger count = new AtomicInteger(0); final AtomicInteger count = new AtomicInteger(0);
Thread t = new Thread("CheckForSplit") { Thread t = new Thread("CheckForSplit") {
@Override public void run() { @Override
for (int i = 0; i < 45; i++) { public void run() {
try { for (int i = 0; i < 45; i++) {
sleep(1000); try {
} catch (InterruptedException e) { sleep(1000);
continue; } catch (InterruptedException e) {
} continue;
// check again
List<HRegionLocation> regions = null;
try {
regions = locator.getAllRegionLocations();
} catch (IOException e) {
e.printStackTrace();
}
if (regions == null) continue;
count.set(regions.size());
if (count.get() >= 2) {
LOG.info("Found: " + regions);
break;
}
LOG.debug("Cycle waiting on split");
} }
LOG.debug("CheckForSplit thread exited, current region count: " + count.get()); // check again
List<HRegionLocation> regions = null;
try {
regions = locator.getAllRegionLocations();
} catch (IOException e) {
e.printStackTrace();
}
if (regions == null) continue;
count.set(regions.size());
if (count.get() >= 2) {
LOG.info("Found: " + regions);
break;
}
LOG.debug("Cycle waiting on split");
} }
}; LOG.debug("CheckForSplit thread exited, current region count: " + count.get());
t.setPriority(Thread.NORM_PRIORITY - 2); }
t.start(); };
t.join(); t.setPriority(Thread.NORM_PRIORITY - 2);
} else { t.start();
// Sync split region, no need to create a thread to check t.join();
((HBaseAdmin)admin).splitRegionSync(m.get(0).getRegionInfo().getRegionName(), splitPoint);
}
// Verify row count // Verify row count
rows = 1; // We counted one row above. rows = 1; // We counted one row above.
@ -1171,23 +1166,12 @@ public class TestAdmin1 {
// regions). Try splitting that region via a different split API (the difference is // regions). Try splitting that region via a different split API (the difference is
// this API goes direct to the regionserver skipping any checks in the admin). Should fail // this API goes direct to the regionserver skipping any checks in the admin). Should fail
try { try {
TEST_UTIL.getHBaseAdmin().splitRegionAsync(regions.get(1).getFirst(), TEST_UTIL.getHBaseAdmin().split(regions.get(1).getSecond(), regions.get(1).getFirst(),
new byte[]{(byte)'1'}); new byte[]{(byte)'1'});
} catch (IOException ex) { } catch (IOException ex) {
gotException = true; gotException = true;
} }
assertTrue(gotException); assertTrue(gotException);
gotException = false;
//testing Sync split operation
try {
TEST_UTIL.getHBaseAdmin().splitRegionSync(regions.get(1).getFirst().getRegionName(),
new byte[]{(byte)'1'});
} catch (IllegalArgumentException ex) {
gotException = true;
}
assertTrue(gotException);
gotException = false; gotException = false;
// Try merging a replica with another. Should fail. // Try merging a replica with another. Should fail.
try { try {

View File

@ -316,16 +316,7 @@ public class TestTablePermissions {
table.put(new Put(Bytes.toBytes("row2")) table.put(new Put(Bytes.toBytes("row2"))
.addColumn(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes("v2"))); .addColumn(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes("v2")));
Admin admin = UTIL.getAdmin(); Admin admin = UTIL.getAdmin();
try { admin.split(TEST_TABLE);
admin.split(TEST_TABLE);
}
catch (IOException e) {
//although split fail, this may not affect following check
//In old Split API without AM2, if region's best split key is not found,
//there are not exception thrown. But in current API, exception
//will be thrown.
LOG.debug("region is not splittable, because " + e);
}
// wait for split // wait for split
Thread.sleep(10000); Thread.sleep(10000);