HBASE-15128 Disable region splits and merges switch in master

This commit is contained in:
chenheng 2016-02-27 08:36:59 +08:00
parent 793babf4a4
commit 99955a3240
20 changed files with 4391 additions and 767 deletions

View File

@ -1677,12 +1677,29 @@ public interface Admin extends Abortable, Closeable {
*/
List<SecurityCapability> getSecurityCapabilities() throws IOException;
/**
* Turn the Split or Merge switches on or off.
*
* @param enabled enabled or not
* @param synchronous If true, it waits until current split() call, if outstanding, to return.
* @param switchTypes switchType list {@link MasterSwitchType}
* @return Previous switch value array
*/
boolean[] setSplitOrMergeEnabled(final boolean enabled, final boolean synchronous,
final MasterSwitchType... switchTypes) throws IOException;
/**
* Query the current state of the switch
*
* @return true if the switch is enabled, false otherwise.
*/
boolean isSplitOrMergeEnabled(final MasterSwitchType switchType) throws IOException;
/**
* Currently, there are only two compact types:
* {@code NORMAL} means do store files compaction;
* {@code MOB} means do mob files compaction.
* */
@InterfaceAudience.Public
@InterfaceStability.Unstable
public enum CompactType {
@ -1692,4 +1709,12 @@ public interface Admin extends Abortable, Closeable {
CompactType(int value) {}
}
@InterfaceAudience.Public
@InterfaceStability.Evolving
public enum MasterSwitchType {
SPLIT,
MERGE
}
}

View File

@ -1741,6 +1741,20 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
return stub.isBalancerEnabled(controller, request);
}
@Override
public MasterProtos.SetSplitOrMergeEnabledResponse setSplitOrMergeEnabled(
RpcController controller, MasterProtos.SetSplitOrMergeEnabledRequest request)
throws ServiceException {
return stub.setSplitOrMergeEnabled(controller, request);
}
@Override
public MasterProtos.IsSplitOrMergeEnabledResponse isSplitOrMergeEnabled(
RpcController controller, MasterProtos.IsSplitOrMergeEnabledRequest request)
throws ServiceException {
return stub.isSplitOrMergeEnabled(controller, request);
}
@Override
public IsNormalizerEnabledResponse isNormalizerEnabled(RpcController controller,
IsNormalizerEnabledRequest request) throws ServiceException {

View File

@ -89,6 +89,7 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescripti
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest;
@ -3378,6 +3379,36 @@ public class HBaseAdmin implements Admin {
}
}
@Override
public boolean[] setSplitOrMergeEnabled(final boolean enabled, final boolean synchronous,
final MasterSwitchType... switchTypes)
throws IOException {
return executeCallable(new MasterCallable<boolean[]>(getConnection()) {
@Override
public boolean[] call(int callTimeout) throws ServiceException {
MasterProtos.SetSplitOrMergeEnabledResponse response = master.setSplitOrMergeEnabled(null,
RequestConverter.buildSetSplitOrMergeEnabledRequest(enabled, synchronous, switchTypes));
boolean[] result = new boolean[switchTypes.length];
int i = 0;
for (Boolean prevValue : response.getPrevValueList()) {
result[i++] = prevValue;
}
return result;
}
});
}
@Override
public boolean isSplitOrMergeEnabled(final MasterSwitchType switchType) throws IOException {
return executeCallable(new MasterCallable<Boolean>(getConnection()) {
@Override
public Boolean call(int callTimeout) throws ServiceException {
return master.isSplitOrMergeEnabled(null,
RequestConverter.buildIsSplitOrMergeEnabledRequest(switchType)).getEnabled();
}
});
}
private HRegionInfo getMobRegionInfo(TableName tableName) {
return new HRegionInfo(tableName, Bytes.toBytes(".mob"),
HConstants.EMPTY_END_ROW, false, 0);

View File

@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Action;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Append;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Durability;
@ -76,6 +77,7 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CompareType;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest;
@ -95,6 +97,7 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabled
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionRequest;
@ -103,6 +106,7 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.OfflineRegionRequ
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest;
@ -1692,4 +1696,49 @@ public final class RequestConverter {
public static SetNormalizerRunningRequest buildSetNormalizerRunningRequest(boolean on) {
return SetNormalizerRunningRequest.newBuilder().setOn(on).build();
}
/**
* Creates a protocol buffer IsSplitOrMergeEnabledRequest
*
* @param switchType see {@link org.apache.hadoop.hbase.client.Admin.MasterSwitchType}
* @return a IsSplitOrMergeEnabledRequest
*/
public static IsSplitOrMergeEnabledRequest buildIsSplitOrMergeEnabledRequest(
Admin.MasterSwitchType switchType) {
IsSplitOrMergeEnabledRequest.Builder builder = IsSplitOrMergeEnabledRequest.newBuilder();
builder.setSwitchType(convert(switchType));
return builder.build();
}
/**
* Creates a protocol buffer SetSplitOrMergeEnabledRequest
*
* @param enabled switch is enabled or not
* @param synchronous set switch sync?
* @param switchTypes see {@link org.apache.hadoop.hbase.client.Admin.MasterSwitchType}, it is
* a list.
* @return a SetSplitOrMergeEnabledRequest
*/
public static SetSplitOrMergeEnabledRequest buildSetSplitOrMergeEnabledRequest(boolean enabled,
boolean synchronous, Admin.MasterSwitchType... switchTypes) {
SetSplitOrMergeEnabledRequest.Builder builder = SetSplitOrMergeEnabledRequest.newBuilder();
builder.setEnabled(enabled);
builder.setSynchronous(synchronous);
for (Admin.MasterSwitchType switchType : switchTypes) {
builder.addSwitchTypes(convert(switchType));
}
return builder.build();
}
private static MasterProtos.MasterSwitchType convert(Admin.MasterSwitchType switchType) {
switch (switchType) {
case SPLIT:
return MasterProtos.MasterSwitchType.SPLIT;
case MERGE:
return MasterProtos.MasterSwitchType.MERGE;
default:
break;
}
throw new UnsupportedOperationException("Unsupport switch type:" + switchType);
}
}

View File

@ -115,6 +115,8 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable {
public String balancerZNode;
// znode containing the state of region normalizer
private String regionNormalizerZNode;
// znode containing the state of all switches, currently there are split and merge child node.
private String switchZNode;
// znode containing the lock for the tables
public String tableLockZNode;
// znode containing the state of recovering regions
@ -430,6 +432,7 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable {
conf.get("zookeeper.znode.balancer", "balancer"));
regionNormalizerZNode = ZKUtil.joinZNode(baseZNode,
conf.get("zookeeper.znode.regionNormalizer", "normalizer"));
switchZNode = ZKUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.switch", "switch"));
tableLockZNode = ZKUtil.joinZNode(baseZNode,
conf.get("zookeeper.znode.tableLock", "table-lock"));
recoveringRegionsZNode = ZKUtil.joinZNode(baseZNode,
@ -789,4 +792,11 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable {
public String getRegionNormalizerZNode() {
return regionNormalizerZNode;
}
/**
* @return ZK node for switch
* */
public String getSwitchZNode() {
return switchZNode;
}
}

View File

@ -8196,6 +8196,450 @@ public final class ZooKeeperProtos {
// @@protoc_insertion_point(class_scope:hbase.pb.TableLock)
}
public interface SwitchStateOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// optional bool enabled = 1;
/**
* <code>optional bool enabled = 1;</code>
*/
boolean hasEnabled();
/**
* <code>optional bool enabled = 1;</code>
*/
boolean getEnabled();
}
/**
* Protobuf type {@code hbase.pb.SwitchState}
*
* <pre>
**
* State of the switch.
* </pre>
*/
public static final class SwitchState extends
com.google.protobuf.GeneratedMessage
implements SwitchStateOrBuilder {
// Use SwitchState.newBuilder() to construct.
private SwitchState(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private SwitchState(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final SwitchState defaultInstance;
public static SwitchState getDefaultInstance() {
return defaultInstance;
}
public SwitchState getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private SwitchState(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
bitField0_ |= 0x00000001;
enabled_ = input.readBool();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SwitchState_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SwitchState_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState.Builder.class);
}
public static com.google.protobuf.Parser<SwitchState> PARSER =
new com.google.protobuf.AbstractParser<SwitchState>() {
public SwitchState parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new SwitchState(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser<SwitchState> getParserForType() {
return PARSER;
}
private int bitField0_;
// optional bool enabled = 1;
public static final int ENABLED_FIELD_NUMBER = 1;
private boolean enabled_;
/**
* <code>optional bool enabled = 1;</code>
*/
public boolean hasEnabled() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* <code>optional bool enabled = 1;</code>
*/
public boolean getEnabled() {
return enabled_;
}
private void initFields() {
enabled_ = false;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBool(1, enabled_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(1, enabled_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState)) {
return super.equals(obj);
}
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState) obj;
boolean result = true;
result = result && (hasEnabled() == other.hasEnabled());
if (hasEnabled()) {
result = result && (getEnabled()
== other.getEnabled());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasEnabled()) {
hash = (37 * hash) + ENABLED_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getEnabled());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hbase.pb.SwitchState}
*
* <pre>
**
* State of the switch.
* </pre>
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder<Builder>
implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchStateOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SwitchState_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SwitchState_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState.Builder.class);
}
// Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
enabled_ = false;
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SwitchState_descriptor;
}
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState getDefaultInstanceForType() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState.getDefaultInstance();
}
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState build() {
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState buildPartial() {
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.enabled_ = enabled_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState) {
return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState other) {
if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState.getDefaultInstance()) return this;
if (other.hasEnabled()) {
setEnabled(other.getEnabled());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// optional bool enabled = 1;
private boolean enabled_ ;
/**
* <code>optional bool enabled = 1;</code>
*/
public boolean hasEnabled() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* <code>optional bool enabled = 1;</code>
*/
public boolean getEnabled() {
return enabled_;
}
/**
* <code>optional bool enabled = 1;</code>
*/
public Builder setEnabled(boolean value) {
bitField0_ |= 0x00000001;
enabled_ = value;
onChanged();
return this;
}
/**
* <code>optional bool enabled = 1;</code>
*/
public Builder clearEnabled() {
bitField0_ = (bitField0_ & ~0x00000001);
enabled_ = false;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hbase.pb.SwitchState)
}
static {
defaultInstance = new SwitchState(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hbase.pb.SwitchState)
}
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hbase_pb_MetaRegionServer_descriptor;
private static
@ -8246,6 +8690,11 @@ public final class ZooKeeperProtos {
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hbase_pb_TableLock_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hbase_pb_SwitchState_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hbase_pb_SwitchState_fieldAccessorTable;
public static com.google.protobuf.Descriptors.FileDescriptor
getDescriptor() {
@ -8286,9 +8735,10 @@ public final class ZooKeeperProtos {
"\022\'\n\ntable_name\030\001 \001(\0132\023.hbase.pb.TableNam" +
"e\022(\n\nlock_owner\030\002 \001(\0132\024.hbase.pb.ServerN",
"ame\022\021\n\tthread_id\030\003 \001(\003\022\021\n\tis_shared\030\004 \001(" +
"\010\022\017\n\007purpose\030\005 \001(\t\022\023\n\013create_time\030\006 \001(\003B" +
"E\n*org.apache.hadoop.hbase.protobuf.gene" +
"ratedB\017ZooKeeperProtosH\001\210\001\001\240\001\001"
"\010\022\017\n\007purpose\030\005 \001(\t\022\023\n\013create_time\030\006 \001(\003\"" +
"\036\n\013SwitchState\022\017\n\007enabled\030\001 \001(\010BE\n*org.a" +
"pache.hadoop.hbase.protobuf.generatedB\017Z" +
"ooKeeperProtosH\001\210\001\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@ -8355,6 +8805,12 @@ public final class ZooKeeperProtos {
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_TableLock_descriptor,
new java.lang.String[] { "TableName", "LockOwner", "ThreadId", "IsShared", "Purpose", "CreateTime", });
internal_static_hbase_pb_SwitchState_descriptor =
getDescriptor().getMessageTypes().get(10);
internal_static_hbase_pb_SwitchState_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_SwitchState_descriptor,
new java.lang.String[] { "Enabled", });
return null;
}
};

View File

@ -279,6 +279,29 @@ message IsBalancerEnabledResponse {
required bool enabled = 1;
}
enum MasterSwitchType {
SPLIT = 0;
MERGE = 1;
}
message SetSplitOrMergeEnabledRequest {
required bool enabled = 1;
optional bool synchronous = 2;
repeated MasterSwitchType switch_types = 3;
}
message SetSplitOrMergeEnabledResponse {
repeated bool prev_value = 1;
}
message IsSplitOrMergeEnabledRequest {
required MasterSwitchType switch_type = 1;
}
message IsSplitOrMergeEnabledResponse {
required bool enabled = 1;
}
message NormalizeRequest {
}
@ -632,6 +655,19 @@ service MasterService {
rpc IsBalancerEnabled(IsBalancerEnabledRequest)
returns(IsBalancerEnabledResponse);
/**
* Turn the split or merge switch on or off.
* If synchronous is true, it waits until current operation call, if outstanding, to return.
*/
rpc SetSplitOrMergeEnabled(SetSplitOrMergeEnabledRequest)
returns(SetSplitOrMergeEnabledResponse);
/**
* Query whether the split or merge switch is on/off.
*/
rpc IsSplitOrMergeEnabled(IsSplitOrMergeEnabledRequest)
returns(IsSplitOrMergeEnabledResponse);
/**
* Run region normalizer. Can NOT run for various reasons. Check logs.
*/

View File

@ -153,3 +153,10 @@ message TableLock {
optional string purpose = 5;
optional int64 create_time = 6;
}
/**
* State of the switch.
*/
message SwitchState {
optional bool enabled = 1;
}

View File

@ -62,6 +62,7 @@ import org.apache.hadoop.hbase.RegionStateListener;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.TableState;
@ -2353,6 +2354,11 @@ public class AssignmentManager {
return hri.getShortNameToLog() + " is not opening on " + serverName;
}
if (!((HMaster)server).getSplitOrMergeTracker().isSplitOrMergeEnabled(
Admin.MasterSwitchType.SPLIT)) {
return "split switch is off!";
}
// Just return in case of retrying
if (current.isSplitting()) {
return null;
@ -2511,6 +2517,10 @@ public class AssignmentManager {
return "Merging daughter region already exists, p=" + current;
}
if (!((HMaster)server).getSplitOrMergeTracker().isSplitOrMergeEnabled(
Admin.MasterSwitchType.MERGE)) {
return "merge switch is off!";
}
// Just return in case of retrying
if (current != null) {
return null;

View File

@ -76,6 +76,7 @@ import org.apache.hadoop.hbase.TableNotDisabledException;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.UnknownRegionException;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.TableState;
@ -155,6 +156,7 @@ import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
import org.apache.hadoop.hbase.zookeeper.RegionNormalizerTracker;
import org.apache.hadoop.hbase.zookeeper.RegionServerTracker;
import org.apache.hadoop.hbase.zookeeper.SplitOrMergeTracker;
import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
@ -253,6 +255,9 @@ public class HMaster extends HRegionServer implements MasterServices {
// Tracker for load balancer state
LoadBalancerTracker loadBalancerTracker;
// Tracker for split and merge state
SplitOrMergeTracker splitOrMergeTracker;
// Tracker for region normalizer state
private RegionNormalizerTracker regionNormalizerTracker;
@ -578,8 +583,13 @@ public class HMaster extends HRegionServer implements MasterServices {
this.normalizer.setMasterServices(this);
this.loadBalancerTracker = new LoadBalancerTracker(zooKeeper, this);
this.loadBalancerTracker.start();
this.regionNormalizerTracker = new RegionNormalizerTracker(zooKeeper, this);
this.regionNormalizerTracker.start();
this.splitOrMergeTracker = new SplitOrMergeTracker(zooKeeper, conf, this);
this.splitOrMergeTracker.start();
this.assignmentManager = new AssignmentManager(this, serverManager,
this.balancer, this.service, this.metricsMaster,
this.tableLockManager, tableStateManager);
@ -2783,6 +2793,20 @@ public class HMaster extends HRegionServer implements MasterServices {
return null == regionNormalizerTracker? false: regionNormalizerTracker.isNormalizerOn();
}
/**
* Queries the state of the {@link SplitOrMergeTracker}. If it is not initialized,
* false is returned. If switchType is illegal, false will return.
* @param switchType see {@link org.apache.hadoop.hbase.client.Admin.MasterSwitchType}
* @return The state of the switch
*/
public boolean isSplitOrMergeEnabled(Admin.MasterSwitchType switchType) {
if (null == splitOrMergeTracker) {
return false;
}
return splitOrMergeTracker.isSplitOrMergeEnabled(switchType);
}
/**
* Fetch the configured {@link LoadBalancer} class name. If none is set, a default is returned.
*
@ -2799,4 +2823,8 @@ public class HMaster extends HRegionServer implements MasterServices {
public RegionNormalizerTracker getRegionNormalizerTracker() {
return regionNormalizerTracker;
}
public SplitOrMergeTracker getSplitOrMergeTracker() {
return splitOrMergeTracker;
}
}

View File

@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.UnknownRegionException;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.TableState;
import org.apache.hadoop.hbase.errorhandling.ForeignException;
import org.apache.hadoop.hbase.exceptions.MergeRegionException;
@ -1505,6 +1506,35 @@ public class MasterRpcServices extends RSRpcServices
return response.build();
}
@Override
public SetSplitOrMergeEnabledResponse setSplitOrMergeEnabled(RpcController controller,
SetSplitOrMergeEnabledRequest request) throws ServiceException {
SetSplitOrMergeEnabledResponse.Builder response = SetSplitOrMergeEnabledResponse.newBuilder();
try {
master.checkInitialized();
boolean newValue = request.getEnabled();
for (MasterSwitchType masterSwitchType : request.getSwitchTypesList()) {
Admin.MasterSwitchType switchType = convert(masterSwitchType);
boolean oldValue = master.isSplitOrMergeEnabled(switchType);
master.getSplitOrMergeTracker().setSplitOrMergeEnabled(newValue, switchType);
response.addPrevValue(oldValue);
}
} catch (IOException e) {
throw new ServiceException(e);
} catch (KeeperException e) {
throw new ServiceException(e);
}
return response.build();
}
@Override
public IsSplitOrMergeEnabledResponse isSplitOrMergeEnabled(RpcController controller,
IsSplitOrMergeEnabledRequest request) throws ServiceException {
IsSplitOrMergeEnabledResponse.Builder response = IsSplitOrMergeEnabledResponse.newBuilder();
response.setEnabled(master.isSplitOrMergeEnabled(convert(request.getSwitchType())));
return response.build();
}
@Override
public NormalizeResponse normalize(RpcController controller,
NormalizeRequest request) throws ServiceException {
@ -1574,4 +1604,16 @@ public class MasterRpcServices extends RSRpcServices
}
return response.build();
}
private Admin.MasterSwitchType convert(MasterSwitchType switchType) {
switch (switchType) {
case SPLIT:
return Admin.MasterSwitchType.SPLIT;
case MERGE:
return Admin.MasterSwitchType.MERGE;
default:
break;
}
return null;
}
}

View File

@ -235,6 +235,7 @@ public class HBaseFsck extends Configured implements Closeable {
private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older
private static boolean forceExclusive = false; // only this hbck can modify HBase
private static boolean disableBalancer = false; // disable load balancer to keep regions stable
private static boolean disableSplitAndMerge = false; // disable split and merge
private boolean fixAssignments = false; // fix assignment errors?
private boolean fixMeta = false; // fix meta errors?
private boolean checkHdfs = true; // load and check fs consistency?
@ -683,6 +684,11 @@ public class HBaseFsck extends Configured implements Closeable {
if (shouldDisableBalancer()) {
oldBalancer = admin.setBalancerRunning(false, true);
}
boolean[] oldSplitAndMerge = null;
if (shouldDisableSplitAndMerge()) {
oldSplitAndMerge = admin.setSplitOrMergeEnabled(false, false,
Admin.MasterSwitchType.SPLIT, Admin.MasterSwitchType.MERGE);
}
try {
onlineConsistencyRepair();
@ -694,6 +700,19 @@ public class HBaseFsck extends Configured implements Closeable {
if (shouldDisableBalancer() && oldBalancer) {
admin.setBalancerRunning(oldBalancer, false);
}
if (shouldDisableSplitAndMerge()) {
if (oldSplitAndMerge != null) {
if (oldSplitAndMerge[0] && oldSplitAndMerge[1]) {
admin.setSplitOrMergeEnabled(true, false,
Admin.MasterSwitchType.SPLIT, Admin.MasterSwitchType.MERGE);
} else if (oldSplitAndMerge[0]) {
admin.setSplitOrMergeEnabled(true, false, Admin.MasterSwitchType.SPLIT);
} else if (oldSplitAndMerge[1]) {
admin.setSplitOrMergeEnabled(true, false, Admin.MasterSwitchType.MERGE);
}
}
}
}
if (checkRegionBoundaries) {
@ -4183,6 +4202,13 @@ public class HBaseFsck extends Configured implements Closeable {
disableBalancer = true;
}
/**
* Disable the split and merge
*/
public static void setDisableSplitAndMerge() {
disableSplitAndMerge = true;
}
/**
* The balancer should be disabled if we are modifying HBase.
* It can be disabled if you want to prevent region movement from causing
@ -4192,6 +4218,15 @@ public class HBaseFsck extends Configured implements Closeable {
return fixAny || disableBalancer;
}
/**
* The split and merge should be disabled if we are modifying HBase.
* It can be disabled if you want to prevent region movement from causing
* false positives.
*/
public boolean shouldDisableSplitAndMerge() {
return fixAny || disableSplitAndMerge;
}
/**
* Set summary mode.
* Print only summary of the tables and status (OK or INCONSISTENT)
@ -4551,6 +4586,8 @@ public class HBaseFsck extends Configured implements Closeable {
setForceExclusive();
} else if (cmd.equals("-disableBalancer")) {
setDisableBalancer();
} else if (cmd.equals("-disableSplitAndMerge")) {
setDisableSplitAndMerge();
} else if (cmd.equals("-timelag")) {
if (i == args.length - 1) {
errors.reportError(ERROR_CODE.WRONG_USAGE, "HBaseFsck: -timelag needs a value.");

View File

@ -0,0 +1,151 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.zookeeper;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.zookeeper.KeeperException;
/**
* Tracks the switch of split and merge states in ZK
*
*/
@InterfaceAudience.Private
public class SplitOrMergeTracker {
private String splitZnode;
private String mergeZnode;
private SwitchStateTracker splitStateTracker;
private SwitchStateTracker mergeStateTracker;
public SplitOrMergeTracker(ZooKeeperWatcher watcher, Configuration conf,
Abortable abortable) {
try {
if (ZKUtil.checkExists(watcher, watcher.getSwitchZNode()) < 0) {
ZKUtil.createAndFailSilent(watcher, watcher.getSwitchZNode());
}
} catch (KeeperException e) {
throw new RuntimeException(e);
}
splitZnode = ZKUtil.joinZNode(watcher.getSwitchZNode(),
conf.get("zookeeper.znode.switch.split", "split"));
mergeZnode = ZKUtil.joinZNode(watcher.getSwitchZNode(),
conf.get("zookeeper.znode.switch.merge", "merge"));
splitStateTracker = new SwitchStateTracker(watcher, splitZnode, abortable);
mergeStateTracker = new SwitchStateTracker(watcher, mergeZnode, abortable);
}
public void start() {
splitStateTracker.start();
mergeStateTracker.start();
}
public boolean isSplitOrMergeEnabled(Admin.MasterSwitchType switchType) {
switch (switchType) {
case SPLIT:
return splitStateTracker.isSwitchEnabled();
case MERGE:
return mergeStateTracker.isSwitchEnabled();
default:
break;
}
return false;
}
public void setSplitOrMergeEnabled(boolean enabled, Admin.MasterSwitchType switchType)
throws KeeperException {
switch (switchType) {
case SPLIT:
splitStateTracker.setSwitchEnabled(enabled);
break;
case MERGE:
mergeStateTracker.setSwitchEnabled(enabled);
break;
default:
break;
}
}
private static class SwitchStateTracker extends ZooKeeperNodeTracker {
public SwitchStateTracker(ZooKeeperWatcher watcher, String node, Abortable abortable) {
super(watcher, node, abortable);
}
/**
* Return true if the switch is on, false otherwise
*/
public boolean isSwitchEnabled() {
byte [] upData = super.getData(false);
try {
// if data in ZK is null, use default of on.
return upData == null || parseFrom(upData).getEnabled();
} catch (DeserializationException dex) {
LOG.error("ZK state for LoadBalancer could not be parsed " + Bytes.toStringBinary(upData));
// return false to be safe.
return false;
}
}
/**
* Set the switch on/off
* @param enabled switch enabled or not?
* @throws KeeperException keepException will be thrown out
*/
public void setSwitchEnabled(boolean enabled) throws KeeperException {
byte [] upData = toByteArray(enabled);
try {
ZKUtil.setData(watcher, node, upData);
} catch(KeeperException.NoNodeException nne) {
ZKUtil.createAndWatch(watcher, node, upData);
}
super.nodeDataChanged(node);
}
private byte [] toByteArray(boolean enabled) {
SwitchState.Builder builder = SwitchState.newBuilder();
builder.setEnabled(enabled);
return ProtobufUtil.prependPBMagic(builder.build().toByteArray());
}
private SwitchState parseFrom(byte [] bytes)
throws DeserializationException {
ProtobufUtil.expectPBMagicPrefix(bytes);
SwitchState.Builder builder = SwitchState.newBuilder();
try {
int magicLen = ProtobufUtil.lengthOfPBMagic();
ProtobufUtil.mergeFrom(builder, bytes, magicLen, bytes.length - magicLen);
} catch (IOException e) {
throw new DeserializationException(e);
}
return builder.build();
}
}
}

View File

@ -0,0 +1,198 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.client;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.Region;
import org.apache.hadoop.hbase.testclassification.ClientTests;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.JVMClusterUtil;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import java.io.IOException;
import java.util.List;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
@Category({MediumTests.class, ClientTests.class})
public class TestSplitOrMergeStatus {
private static final Log LOG = LogFactory.getLog(TestSplitOrMergeStatus.class);
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private static byte [] FAMILY = Bytes.toBytes("testFamily");
/**
* @throws java.lang.Exception
*/
@BeforeClass
public static void setUpBeforeClass() throws Exception {
TEST_UTIL.startMiniCluster(2);
}
/**
* @throws java.lang.Exception
*/
@AfterClass
public static void tearDownAfterClass() throws Exception {
TEST_UTIL.shutdownMiniCluster();
}
@Test
public void testSplitSwitch() throws Exception {
TableName name = TableName.valueOf("testSplitSwitch");
Table t = TEST_UTIL.createTable(name, FAMILY);
TEST_UTIL.loadTable(t, FAMILY, false);
RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(t.getName());
int orignalCount = locator.getAllRegionLocations().size();
Admin admin = TEST_UTIL.getAdmin();
initSwitchStatus(admin);
boolean[] results = admin.setSplitOrMergeEnabled(false, false, Admin.MasterSwitchType.SPLIT);
assertEquals(results.length, 1);
assertTrue(results[0]);
admin.split(t.getName());
int count = waitOnSplitOrMerge(t).size();
assertTrue(orignalCount == count);
results = admin.setSplitOrMergeEnabled(true, false, Admin.MasterSwitchType.SPLIT);
assertEquals(results.length, 1);
assertFalse(results[0]);
admin.split(t.getName());
count = waitOnSplitOrMerge(t).size();
assertTrue(orignalCount<count);
admin.close();
}
@Test
public void testMergeSwitch() throws Exception {
TableName name = TableName.valueOf("testMergeSwitch");
Table t = TEST_UTIL.createTable(name, FAMILY);
TEST_UTIL.loadTable(t, FAMILY, false);
RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(t.getName());
Admin admin = TEST_UTIL.getAdmin();
initSwitchStatus(admin);
admin.split(t.getName());
waitOnSplitOrMerge(t); //Split the table to ensure we have two regions at least.
waitForMergable(admin, name);
int orignalCount = locator.getAllRegionLocations().size();
boolean[] results = admin.setSplitOrMergeEnabled(false, false, Admin.MasterSwitchType.MERGE);
assertEquals(results.length, 1);
assertTrue(results[0]);
List<HRegionInfo> regions = admin.getTableRegions(t.getName());
assertTrue(regions.size() > 1);
admin.mergeRegions(regions.get(0).getEncodedNameAsBytes(),
regions.get(1).getEncodedNameAsBytes(), true);
int count = waitOnSplitOrMerge(t).size();
assertTrue(orignalCount == count);
waitForMergable(admin, name);
results = admin.setSplitOrMergeEnabled(true, false, Admin.MasterSwitchType.MERGE);
assertEquals(results.length, 1);
assertFalse(results[0]);
admin.mergeRegions(regions.get(0).getEncodedNameAsBytes(),
regions.get(1).getEncodedNameAsBytes(), true);
count = waitOnSplitOrMerge(t).size();
assertTrue(orignalCount>count);
admin.close();
}
@Test
public void testMultiSwitches() throws IOException {
Admin admin = TEST_UTIL.getAdmin();
boolean[] switches = admin.setSplitOrMergeEnabled(false, false,
Admin.MasterSwitchType.SPLIT, Admin.MasterSwitchType.MERGE);
for (boolean s : switches){
assertTrue(s);
}
assertFalse(admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.SPLIT));
assertFalse(admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.MERGE));
admin.close();
}
private void initSwitchStatus(Admin admin) throws IOException {
if (!admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.SPLIT)) {
admin.setSplitOrMergeEnabled(true, false, Admin.MasterSwitchType.SPLIT);
}
if (!admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.MERGE)) {
admin.setSplitOrMergeEnabled(true, false, Admin.MasterSwitchType.MERGE);
}
assertTrue(admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.SPLIT));
assertTrue(admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.MERGE));
}
private void waitForMergable(Admin admin, TableName t) throws InterruptedException, IOException {
// Wait for the Regions to be mergeable
MiniHBaseCluster miniCluster = TEST_UTIL.getMiniHBaseCluster();
int mergeable = 0;
while (mergeable < 2) {
Thread.sleep(100);
admin.majorCompact(t);
mergeable = 0;
for (JVMClusterUtil.RegionServerThread regionThread: miniCluster.getRegionServerThreads()) {
for (Region region: regionThread.getRegionServer().getOnlineRegions(t)) {
mergeable += ((HRegion)region).isMergeable() ? 1 : 0;
}
}
}
}
/*
* Wait on table split. May return because we waited long enough on the split
* and it didn't happen. Caller should check.
* @param t
* @return Map of table regions; caller needs to check table actually split.
*/
private List<HRegionLocation> waitOnSplitOrMerge(final Table t)
throws IOException {
try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(t.getName())) {
List<HRegionLocation> regions = locator.getAllRegionLocations();
int originalCount = regions.size();
for (int i = 0; i < TEST_UTIL.getConfiguration().getInt("hbase.test.retries", 10); i++) {
Thread.currentThread();
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
e.printStackTrace();
}
regions = locator.getAllRegionLocations();
if (regions.size() != originalCount)
break;
}
return regions;
}
}
}

View File

@ -132,6 +132,38 @@ module Hbase
end
end
#----------------------------------------------------------------------------------------------
# Enable/disable one split or merge switch
# Returns previous switch setting.
def splitormerge_switch(type, enabled)
switch_type = nil
if type == 'SPLIT'
switch_type = org.apache.hadoop.hbase.client.Admin::MasterSwitchType::SPLIT
elsif type == 'MERGE'
switch_type = org.apache.hadoop.hbase.client.Admin::MasterSwitchType::MERGE
else
raise ArgumentError, 'only SPLIT or MERGE accepted for type!'
end
@admin.setSplitOrMergeEnabled(
java.lang.Boolean.valueOf(enabled), java.lang.Boolean.valueOf(false),
switch_type)[0]
end
#----------------------------------------------------------------------------------------------
# Query the current state of the split or merge switch.
# Returns the switch's state (true is enabled).
def splitormerge_enabled(type)
switch_type = nil
if type == 'SPLIT'
switch_type = org.apache.hadoop.hbase.client.Admin::MasterSwitchType::SPLIT
elsif type == 'MERGE'
switch_type = org.apache.hadoop.hbase.client.Admin::MasterSwitchType::MERGE
else
raise ArgumentError, 'only SPLIT or MERGE accepted for type!'
end
@admin.isSplitOrMergeEnabled(switch_type)
end
def locate_region(table_name, row_key)
locator = @connection.getRegionLocator(TableName.valueOf(table_name))
begin

View File

@ -333,6 +333,8 @@ Shell.load_command_group(
catalogjanitor_enabled
compact_rs
trace
splitormerge_switch
splitormerge_enabled
],
# TODO remove older hlog_roll command
:aliases => {

View File

@ -0,0 +1,41 @@
#!/usr/bin/env hbase-jruby
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with this
# work for additional information regarding copyright ownership. The ASF
# licenses this file to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Prints the current split or merge status
module Shell
module Commands
# Command for check split or merge switch status
class SplitormergeEnabled < Command
def help
print <<-EOF
Query the switch's state. You can set switch type, 'SPLIT' or 'MERGE'
Examples:
hbase> splitormerge_enabled 'SPLIT'
EOF
end
def command(switch_type)
format_simple_command do
formatter.row(
[admin.splitormerge_enabled(switch_type) ? 'true' : 'false']
)
end
end
end
end
end

View File

@ -0,0 +1,43 @@
#
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module Shell
module Commands
# Command for set switch for split and merge
class SplitormergeSwitch < Command
def help
print <<-EOF
Enable/Disable one switch. You can set switch type 'SPLIT' or 'MERGE'. Returns previous split state.
Examples:
hbase> splitormerge_switch 'SPLIT', true
hbase> splitormerge_switch 'SPLIT', false
EOF
end
def command(switch_type, enabled)
format_simple_command do
formatter.row(
[admin.splitormerge_switch(switch_type, enabled) ? 'true' : 'false']
)
end
end
end
end
end