HBASE-6033 Adding some fuction to check if a table/region is in compaction (Jimmy)
git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1342149 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
36bac7b88b
commit
c35206b420
|
@ -62,7 +62,9 @@ import org.apache.hadoop.hbase.ipc.HMasterInterface;
|
||||||
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
||||||
import org.apache.hadoop.hbase.protobuf.RequestConverter;
|
import org.apache.hadoop.hbase.protobuf.RequestConverter;
|
||||||
import org.apache.hadoop.hbase.protobuf.ResponseConverter;
|
import org.apache.hadoop.hbase.protobuf.ResponseConverter;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
|
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest;
|
||||||
|
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse;
|
||||||
|
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest;
|
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse;
|
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest;
|
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest;
|
||||||
|
@ -72,8 +74,6 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRespo
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest;
|
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest;
|
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse;
|
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier;
|
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
|
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionRequest;
|
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionRequest;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionRequest;
|
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionRequest;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest;
|
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest;
|
||||||
|
@ -1823,6 +1823,98 @@ public class HBaseAdmin implements Abortable, Closeable {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the current compaction state of a table or region.
|
||||||
|
* It could be in a major compaction, a minor compaction, both, or none.
|
||||||
|
*
|
||||||
|
* @param tableNameOrRegionName table or region to major compact
|
||||||
|
* @throws IOException if a remote or network exception occurs
|
||||||
|
* @throws InterruptedException
|
||||||
|
* @return the current compaction state
|
||||||
|
*/
|
||||||
|
public CompactionState getCompactionState(final String tableNameOrRegionName)
|
||||||
|
throws IOException, InterruptedException {
|
||||||
|
return getCompactionState(Bytes.toBytes(tableNameOrRegionName));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the current compaction state of a table or region.
|
||||||
|
* It could be in a major compaction, a minor compaction, both, or none.
|
||||||
|
*
|
||||||
|
* @param tableNameOrRegionName table or region to major compact
|
||||||
|
* @throws IOException if a remote or network exception occurs
|
||||||
|
* @throws InterruptedException
|
||||||
|
* @return the current compaction state
|
||||||
|
*/
|
||||||
|
public CompactionState getCompactionState(final byte [] tableNameOrRegionName)
|
||||||
|
throws IOException, InterruptedException {
|
||||||
|
CompactionState state = CompactionState.NONE;
|
||||||
|
CatalogTracker ct = getCatalogTracker();
|
||||||
|
try {
|
||||||
|
if (isRegionName(tableNameOrRegionName, ct)) {
|
||||||
|
Pair<HRegionInfo, ServerName> pair =
|
||||||
|
MetaReader.getRegion(ct, tableNameOrRegionName);
|
||||||
|
if (pair == null || pair.getSecond() == null) {
|
||||||
|
LOG.info("No server in .META. for " +
|
||||||
|
Bytes.toStringBinary(tableNameOrRegionName) + "; pair=" + pair);
|
||||||
|
} else {
|
||||||
|
ServerName sn = pair.getSecond();
|
||||||
|
AdminProtocol admin =
|
||||||
|
this.connection.getAdmin(sn.getHostname(), sn.getPort());
|
||||||
|
GetRegionInfoRequest request = RequestConverter.buildGetRegionInfoRequest(
|
||||||
|
pair.getFirst().getRegionName(), true);
|
||||||
|
GetRegionInfoResponse response = admin.getRegionInfo(null, request);
|
||||||
|
return response.getCompactionState();
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
final String tableName = tableNameString(tableNameOrRegionName, ct);
|
||||||
|
List<Pair<HRegionInfo, ServerName>> pairs =
|
||||||
|
MetaReader.getTableRegionsAndLocations(ct, tableName);
|
||||||
|
for (Pair<HRegionInfo, ServerName> pair: pairs) {
|
||||||
|
if (pair.getFirst().isOffline()) continue;
|
||||||
|
if (pair.getSecond() == null) continue;
|
||||||
|
try {
|
||||||
|
ServerName sn = pair.getSecond();
|
||||||
|
AdminProtocol admin =
|
||||||
|
this.connection.getAdmin(sn.getHostname(), sn.getPort());
|
||||||
|
GetRegionInfoRequest request = RequestConverter.buildGetRegionInfoRequest(
|
||||||
|
pair.getFirst().getRegionName(), true);
|
||||||
|
GetRegionInfoResponse response = admin.getRegionInfo(null, request);
|
||||||
|
switch (response.getCompactionState()) {
|
||||||
|
case MAJOR_AND_MINOR:
|
||||||
|
return CompactionState.MAJOR_AND_MINOR;
|
||||||
|
case MAJOR:
|
||||||
|
if (state == CompactionState.MINOR) {
|
||||||
|
return CompactionState.MAJOR_AND_MINOR;
|
||||||
|
}
|
||||||
|
state = CompactionState.MAJOR;
|
||||||
|
break;
|
||||||
|
case MINOR:
|
||||||
|
if (state == CompactionState.MAJOR) {
|
||||||
|
return CompactionState.MAJOR_AND_MINOR;
|
||||||
|
}
|
||||||
|
state = CompactionState.MINOR;
|
||||||
|
break;
|
||||||
|
case NONE:
|
||||||
|
default: // nothing, continue
|
||||||
|
}
|
||||||
|
} catch (NotServingRegionException e) {
|
||||||
|
if (LOG.isDebugEnabled()) {
|
||||||
|
LOG.debug("Trying to get compaction state of " +
|
||||||
|
pair.getFirst() + ": " +
|
||||||
|
StringUtils.stringifyException(e));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch (ServiceException se) {
|
||||||
|
throw ProtobufUtil.getRemoteException(se);
|
||||||
|
} finally {
|
||||||
|
cleanupCatalogTracker(ct);
|
||||||
|
}
|
||||||
|
return state;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @see {@link #execute}
|
* @see {@link #execute}
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -516,20 +516,36 @@ public final class RequestConverter {
|
||||||
// End utilities for Client
|
// End utilities for Client
|
||||||
//Start utilities for Admin
|
//Start utilities for Admin
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a protocol buffer GetRegionInfoRequest for a given region name
|
* Create a protocol buffer GetRegionInfoRequest for a given region name
|
||||||
*
|
*
|
||||||
* @param regionName the name of the region to get info
|
* @param regionName the name of the region to get info
|
||||||
* @return a protocol buffer GetRegionInfoRequest
|
* @return a protocol buffer GetRegionInfoRequest
|
||||||
*/
|
*/
|
||||||
public static GetRegionInfoRequest
|
public static GetRegionInfoRequest
|
||||||
buildGetRegionInfoRequest(final byte[] regionName) {
|
buildGetRegionInfoRequest(final byte[] regionName) {
|
||||||
GetRegionInfoRequest.Builder builder = GetRegionInfoRequest.newBuilder();
|
return buildGetRegionInfoRequest(regionName, false);
|
||||||
RegionSpecifier region = buildRegionSpecifier(
|
}
|
||||||
RegionSpecifierType.REGION_NAME, regionName);
|
|
||||||
builder.setRegion(region);
|
/**
|
||||||
return builder.build();
|
* Create a protocol buffer GetRegionInfoRequest for a given region name
|
||||||
}
|
*
|
||||||
|
* @param regionName the name of the region to get info
|
||||||
|
* @param includeCompactionState indicate if the compaction state is requested
|
||||||
|
* @return a protocol buffer GetRegionInfoRequest
|
||||||
|
*/
|
||||||
|
public static GetRegionInfoRequest
|
||||||
|
buildGetRegionInfoRequest(final byte[] regionName,
|
||||||
|
final boolean includeCompactionState) {
|
||||||
|
GetRegionInfoRequest.Builder builder = GetRegionInfoRequest.newBuilder();
|
||||||
|
RegionSpecifier region = buildRegionSpecifier(
|
||||||
|
RegionSpecifierType.REGION_NAME, regionName);
|
||||||
|
builder.setRegion(region);
|
||||||
|
if (includeCompactionState) {
|
||||||
|
builder.setCompactionState(includeCompactionState);
|
||||||
|
}
|
||||||
|
return builder.build();
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a protocol buffer GetStoreFileRequest for a given region name
|
* Create a protocol buffer GetStoreFileRequest for a given region name
|
||||||
|
@ -882,7 +898,6 @@ public final class RequestConverter {
|
||||||
public static UnassignRegionRequest buildUnassignRegionRequest(
|
public static UnassignRegionRequest buildUnassignRegionRequest(
|
||||||
final byte [] regionName, final boolean force) {
|
final byte [] regionName, final boolean force) {
|
||||||
UnassignRegionRequest.Builder builder = UnassignRegionRequest.newBuilder();
|
UnassignRegionRequest.Builder builder = UnassignRegionRequest.newBuilder();
|
||||||
RegionSpecifier.Builder rspec = RegionSpecifier.newBuilder();
|
|
||||||
builder.setRegion(buildRegionSpecifier(RegionSpecifierType.REGION_NAME,regionName));
|
builder.setRegion(buildRegionSpecifier(RegionSpecifierType.REGION_NAME,regionName));
|
||||||
builder.setForce(force);
|
builder.setForce(force);
|
||||||
return builder.build();
|
return builder.build();
|
||||||
|
|
|
@ -15,6 +15,10 @@ public final class AdminProtos {
|
||||||
boolean hasRegion();
|
boolean hasRegion();
|
||||||
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegion();
|
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegion();
|
||||||
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder();
|
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder();
|
||||||
|
|
||||||
|
// optional bool compactionState = 2;
|
||||||
|
boolean hasCompactionState();
|
||||||
|
boolean getCompactionState();
|
||||||
}
|
}
|
||||||
public static final class GetRegionInfoRequest extends
|
public static final class GetRegionInfoRequest extends
|
||||||
com.google.protobuf.GeneratedMessage
|
com.google.protobuf.GeneratedMessage
|
||||||
|
@ -58,8 +62,19 @@ public final class AdminProtos {
|
||||||
return region_;
|
return region_;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// optional bool compactionState = 2;
|
||||||
|
public static final int COMPACTIONSTATE_FIELD_NUMBER = 2;
|
||||||
|
private boolean compactionState_;
|
||||||
|
public boolean hasCompactionState() {
|
||||||
|
return ((bitField0_ & 0x00000002) == 0x00000002);
|
||||||
|
}
|
||||||
|
public boolean getCompactionState() {
|
||||||
|
return compactionState_;
|
||||||
|
}
|
||||||
|
|
||||||
private void initFields() {
|
private void initFields() {
|
||||||
region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
|
region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
|
||||||
|
compactionState_ = false;
|
||||||
}
|
}
|
||||||
private byte memoizedIsInitialized = -1;
|
private byte memoizedIsInitialized = -1;
|
||||||
public final boolean isInitialized() {
|
public final boolean isInitialized() {
|
||||||
|
@ -84,6 +99,9 @@ public final class AdminProtos {
|
||||||
if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
||||||
output.writeMessage(1, region_);
|
output.writeMessage(1, region_);
|
||||||
}
|
}
|
||||||
|
if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
||||||
|
output.writeBool(2, compactionState_);
|
||||||
|
}
|
||||||
getUnknownFields().writeTo(output);
|
getUnknownFields().writeTo(output);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -97,6 +115,10 @@ public final class AdminProtos {
|
||||||
size += com.google.protobuf.CodedOutputStream
|
size += com.google.protobuf.CodedOutputStream
|
||||||
.computeMessageSize(1, region_);
|
.computeMessageSize(1, region_);
|
||||||
}
|
}
|
||||||
|
if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
||||||
|
size += com.google.protobuf.CodedOutputStream
|
||||||
|
.computeBoolSize(2, compactionState_);
|
||||||
|
}
|
||||||
size += getUnknownFields().getSerializedSize();
|
size += getUnknownFields().getSerializedSize();
|
||||||
memoizedSerializedSize = size;
|
memoizedSerializedSize = size;
|
||||||
return size;
|
return size;
|
||||||
|
@ -125,6 +147,11 @@ public final class AdminProtos {
|
||||||
result = result && getRegion()
|
result = result && getRegion()
|
||||||
.equals(other.getRegion());
|
.equals(other.getRegion());
|
||||||
}
|
}
|
||||||
|
result = result && (hasCompactionState() == other.hasCompactionState());
|
||||||
|
if (hasCompactionState()) {
|
||||||
|
result = result && (getCompactionState()
|
||||||
|
== other.getCompactionState());
|
||||||
|
}
|
||||||
result = result &&
|
result = result &&
|
||||||
getUnknownFields().equals(other.getUnknownFields());
|
getUnknownFields().equals(other.getUnknownFields());
|
||||||
return result;
|
return result;
|
||||||
|
@ -138,6 +165,10 @@ public final class AdminProtos {
|
||||||
hash = (37 * hash) + REGION_FIELD_NUMBER;
|
hash = (37 * hash) + REGION_FIELD_NUMBER;
|
||||||
hash = (53 * hash) + getRegion().hashCode();
|
hash = (53 * hash) + getRegion().hashCode();
|
||||||
}
|
}
|
||||||
|
if (hasCompactionState()) {
|
||||||
|
hash = (37 * hash) + COMPACTIONSTATE_FIELD_NUMBER;
|
||||||
|
hash = (53 * hash) + hashBoolean(getCompactionState());
|
||||||
|
}
|
||||||
hash = (29 * hash) + getUnknownFields().hashCode();
|
hash = (29 * hash) + getUnknownFields().hashCode();
|
||||||
return hash;
|
return hash;
|
||||||
}
|
}
|
||||||
|
@ -261,6 +292,8 @@ public final class AdminProtos {
|
||||||
regionBuilder_.clear();
|
regionBuilder_.clear();
|
||||||
}
|
}
|
||||||
bitField0_ = (bitField0_ & ~0x00000001);
|
bitField0_ = (bitField0_ & ~0x00000001);
|
||||||
|
compactionState_ = false;
|
||||||
|
bitField0_ = (bitField0_ & ~0x00000002);
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -307,6 +340,10 @@ public final class AdminProtos {
|
||||||
} else {
|
} else {
|
||||||
result.region_ = regionBuilder_.build();
|
result.region_ = regionBuilder_.build();
|
||||||
}
|
}
|
||||||
|
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
|
||||||
|
to_bitField0_ |= 0x00000002;
|
||||||
|
}
|
||||||
|
result.compactionState_ = compactionState_;
|
||||||
result.bitField0_ = to_bitField0_;
|
result.bitField0_ = to_bitField0_;
|
||||||
onBuilt();
|
onBuilt();
|
||||||
return result;
|
return result;
|
||||||
|
@ -326,6 +363,9 @@ public final class AdminProtos {
|
||||||
if (other.hasRegion()) {
|
if (other.hasRegion()) {
|
||||||
mergeRegion(other.getRegion());
|
mergeRegion(other.getRegion());
|
||||||
}
|
}
|
||||||
|
if (other.hasCompactionState()) {
|
||||||
|
setCompactionState(other.getCompactionState());
|
||||||
|
}
|
||||||
this.mergeUnknownFields(other.getUnknownFields());
|
this.mergeUnknownFields(other.getUnknownFields());
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
@ -374,6 +414,11 @@ public final class AdminProtos {
|
||||||
setRegion(subBuilder.buildPartial());
|
setRegion(subBuilder.buildPartial());
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
case 16: {
|
||||||
|
bitField0_ |= 0x00000002;
|
||||||
|
compactionState_ = input.readBool();
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -470,6 +515,27 @@ public final class AdminProtos {
|
||||||
return regionBuilder_;
|
return regionBuilder_;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// optional bool compactionState = 2;
|
||||||
|
private boolean compactionState_ ;
|
||||||
|
public boolean hasCompactionState() {
|
||||||
|
return ((bitField0_ & 0x00000002) == 0x00000002);
|
||||||
|
}
|
||||||
|
public boolean getCompactionState() {
|
||||||
|
return compactionState_;
|
||||||
|
}
|
||||||
|
public Builder setCompactionState(boolean value) {
|
||||||
|
bitField0_ |= 0x00000002;
|
||||||
|
compactionState_ = value;
|
||||||
|
onChanged();
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
public Builder clearCompactionState() {
|
||||||
|
bitField0_ = (bitField0_ & ~0x00000002);
|
||||||
|
compactionState_ = false;
|
||||||
|
onChanged();
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
// @@protoc_insertion_point(builder_scope:GetRegionInfoRequest)
|
// @@protoc_insertion_point(builder_scope:GetRegionInfoRequest)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -488,6 +554,10 @@ public final class AdminProtos {
|
||||||
boolean hasRegionInfo();
|
boolean hasRegionInfo();
|
||||||
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo();
|
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo();
|
||||||
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder();
|
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder();
|
||||||
|
|
||||||
|
// optional .GetRegionInfoResponse.CompactionState compactionState = 2;
|
||||||
|
boolean hasCompactionState();
|
||||||
|
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState getCompactionState();
|
||||||
}
|
}
|
||||||
public static final class GetRegionInfoResponse extends
|
public static final class GetRegionInfoResponse extends
|
||||||
com.google.protobuf.GeneratedMessage
|
com.google.protobuf.GeneratedMessage
|
||||||
|
@ -517,6 +587,81 @@ public final class AdminProtos {
|
||||||
return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_GetRegionInfoResponse_fieldAccessorTable;
|
return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_GetRegionInfoResponse_fieldAccessorTable;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public enum CompactionState
|
||||||
|
implements com.google.protobuf.ProtocolMessageEnum {
|
||||||
|
NONE(0, 0),
|
||||||
|
MINOR(1, 1),
|
||||||
|
MAJOR(2, 2),
|
||||||
|
MAJOR_AND_MINOR(3, 3),
|
||||||
|
;
|
||||||
|
|
||||||
|
public static final int NONE_VALUE = 0;
|
||||||
|
public static final int MINOR_VALUE = 1;
|
||||||
|
public static final int MAJOR_VALUE = 2;
|
||||||
|
public static final int MAJOR_AND_MINOR_VALUE = 3;
|
||||||
|
|
||||||
|
|
||||||
|
public final int getNumber() { return value; }
|
||||||
|
|
||||||
|
public static CompactionState valueOf(int value) {
|
||||||
|
switch (value) {
|
||||||
|
case 0: return NONE;
|
||||||
|
case 1: return MINOR;
|
||||||
|
case 2: return MAJOR;
|
||||||
|
case 3: return MAJOR_AND_MINOR;
|
||||||
|
default: return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public static com.google.protobuf.Internal.EnumLiteMap<CompactionState>
|
||||||
|
internalGetValueMap() {
|
||||||
|
return internalValueMap;
|
||||||
|
}
|
||||||
|
private static com.google.protobuf.Internal.EnumLiteMap<CompactionState>
|
||||||
|
internalValueMap =
|
||||||
|
new com.google.protobuf.Internal.EnumLiteMap<CompactionState>() {
|
||||||
|
public CompactionState findValueByNumber(int number) {
|
||||||
|
return CompactionState.valueOf(number);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
public final com.google.protobuf.Descriptors.EnumValueDescriptor
|
||||||
|
getValueDescriptor() {
|
||||||
|
return getDescriptor().getValues().get(index);
|
||||||
|
}
|
||||||
|
public final com.google.protobuf.Descriptors.EnumDescriptor
|
||||||
|
getDescriptorForType() {
|
||||||
|
return getDescriptor();
|
||||||
|
}
|
||||||
|
public static final com.google.protobuf.Descriptors.EnumDescriptor
|
||||||
|
getDescriptor() {
|
||||||
|
return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.getDescriptor().getEnumTypes().get(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
private static final CompactionState[] VALUES = {
|
||||||
|
NONE, MINOR, MAJOR, MAJOR_AND_MINOR,
|
||||||
|
};
|
||||||
|
|
||||||
|
public static CompactionState valueOf(
|
||||||
|
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
|
||||||
|
if (desc.getType() != getDescriptor()) {
|
||||||
|
throw new java.lang.IllegalArgumentException(
|
||||||
|
"EnumValueDescriptor is not for this type.");
|
||||||
|
}
|
||||||
|
return VALUES[desc.getIndex()];
|
||||||
|
}
|
||||||
|
|
||||||
|
private final int index;
|
||||||
|
private final int value;
|
||||||
|
|
||||||
|
private CompactionState(int index, int value) {
|
||||||
|
this.index = index;
|
||||||
|
this.value = value;
|
||||||
|
}
|
||||||
|
|
||||||
|
// @@protoc_insertion_point(enum_scope:GetRegionInfoResponse.CompactionState)
|
||||||
|
}
|
||||||
|
|
||||||
private int bitField0_;
|
private int bitField0_;
|
||||||
// required .RegionInfo regionInfo = 1;
|
// required .RegionInfo regionInfo = 1;
|
||||||
public static final int REGIONINFO_FIELD_NUMBER = 1;
|
public static final int REGIONINFO_FIELD_NUMBER = 1;
|
||||||
|
@ -531,8 +676,19 @@ public final class AdminProtos {
|
||||||
return regionInfo_;
|
return regionInfo_;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// optional .GetRegionInfoResponse.CompactionState compactionState = 2;
|
||||||
|
public static final int COMPACTIONSTATE_FIELD_NUMBER = 2;
|
||||||
|
private org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState compactionState_;
|
||||||
|
public boolean hasCompactionState() {
|
||||||
|
return ((bitField0_ & 0x00000002) == 0x00000002);
|
||||||
|
}
|
||||||
|
public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState getCompactionState() {
|
||||||
|
return compactionState_;
|
||||||
|
}
|
||||||
|
|
||||||
private void initFields() {
|
private void initFields() {
|
||||||
regionInfo_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance();
|
regionInfo_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance();
|
||||||
|
compactionState_ = org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState.NONE;
|
||||||
}
|
}
|
||||||
private byte memoizedIsInitialized = -1;
|
private byte memoizedIsInitialized = -1;
|
||||||
public final boolean isInitialized() {
|
public final boolean isInitialized() {
|
||||||
|
@ -557,6 +713,9 @@ public final class AdminProtos {
|
||||||
if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
||||||
output.writeMessage(1, regionInfo_);
|
output.writeMessage(1, regionInfo_);
|
||||||
}
|
}
|
||||||
|
if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
||||||
|
output.writeEnum(2, compactionState_.getNumber());
|
||||||
|
}
|
||||||
getUnknownFields().writeTo(output);
|
getUnknownFields().writeTo(output);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -570,6 +729,10 @@ public final class AdminProtos {
|
||||||
size += com.google.protobuf.CodedOutputStream
|
size += com.google.protobuf.CodedOutputStream
|
||||||
.computeMessageSize(1, regionInfo_);
|
.computeMessageSize(1, regionInfo_);
|
||||||
}
|
}
|
||||||
|
if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
||||||
|
size += com.google.protobuf.CodedOutputStream
|
||||||
|
.computeEnumSize(2, compactionState_.getNumber());
|
||||||
|
}
|
||||||
size += getUnknownFields().getSerializedSize();
|
size += getUnknownFields().getSerializedSize();
|
||||||
memoizedSerializedSize = size;
|
memoizedSerializedSize = size;
|
||||||
return size;
|
return size;
|
||||||
|
@ -598,6 +761,11 @@ public final class AdminProtos {
|
||||||
result = result && getRegionInfo()
|
result = result && getRegionInfo()
|
||||||
.equals(other.getRegionInfo());
|
.equals(other.getRegionInfo());
|
||||||
}
|
}
|
||||||
|
result = result && (hasCompactionState() == other.hasCompactionState());
|
||||||
|
if (hasCompactionState()) {
|
||||||
|
result = result &&
|
||||||
|
(getCompactionState() == other.getCompactionState());
|
||||||
|
}
|
||||||
result = result &&
|
result = result &&
|
||||||
getUnknownFields().equals(other.getUnknownFields());
|
getUnknownFields().equals(other.getUnknownFields());
|
||||||
return result;
|
return result;
|
||||||
|
@ -611,6 +779,10 @@ public final class AdminProtos {
|
||||||
hash = (37 * hash) + REGIONINFO_FIELD_NUMBER;
|
hash = (37 * hash) + REGIONINFO_FIELD_NUMBER;
|
||||||
hash = (53 * hash) + getRegionInfo().hashCode();
|
hash = (53 * hash) + getRegionInfo().hashCode();
|
||||||
}
|
}
|
||||||
|
if (hasCompactionState()) {
|
||||||
|
hash = (37 * hash) + COMPACTIONSTATE_FIELD_NUMBER;
|
||||||
|
hash = (53 * hash) + hashEnum(getCompactionState());
|
||||||
|
}
|
||||||
hash = (29 * hash) + getUnknownFields().hashCode();
|
hash = (29 * hash) + getUnknownFields().hashCode();
|
||||||
return hash;
|
return hash;
|
||||||
}
|
}
|
||||||
|
@ -734,6 +906,8 @@ public final class AdminProtos {
|
||||||
regionInfoBuilder_.clear();
|
regionInfoBuilder_.clear();
|
||||||
}
|
}
|
||||||
bitField0_ = (bitField0_ & ~0x00000001);
|
bitField0_ = (bitField0_ & ~0x00000001);
|
||||||
|
compactionState_ = org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState.NONE;
|
||||||
|
bitField0_ = (bitField0_ & ~0x00000002);
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -780,6 +954,10 @@ public final class AdminProtos {
|
||||||
} else {
|
} else {
|
||||||
result.regionInfo_ = regionInfoBuilder_.build();
|
result.regionInfo_ = regionInfoBuilder_.build();
|
||||||
}
|
}
|
||||||
|
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
|
||||||
|
to_bitField0_ |= 0x00000002;
|
||||||
|
}
|
||||||
|
result.compactionState_ = compactionState_;
|
||||||
result.bitField0_ = to_bitField0_;
|
result.bitField0_ = to_bitField0_;
|
||||||
onBuilt();
|
onBuilt();
|
||||||
return result;
|
return result;
|
||||||
|
@ -799,6 +977,9 @@ public final class AdminProtos {
|
||||||
if (other.hasRegionInfo()) {
|
if (other.hasRegionInfo()) {
|
||||||
mergeRegionInfo(other.getRegionInfo());
|
mergeRegionInfo(other.getRegionInfo());
|
||||||
}
|
}
|
||||||
|
if (other.hasCompactionState()) {
|
||||||
|
setCompactionState(other.getCompactionState());
|
||||||
|
}
|
||||||
this.mergeUnknownFields(other.getUnknownFields());
|
this.mergeUnknownFields(other.getUnknownFields());
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
@ -847,6 +1028,17 @@ public final class AdminProtos {
|
||||||
setRegionInfo(subBuilder.buildPartial());
|
setRegionInfo(subBuilder.buildPartial());
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
case 16: {
|
||||||
|
int rawValue = input.readEnum();
|
||||||
|
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState value = org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState.valueOf(rawValue);
|
||||||
|
if (value == null) {
|
||||||
|
unknownFields.mergeVarintField(2, rawValue);
|
||||||
|
} else {
|
||||||
|
bitField0_ |= 0x00000002;
|
||||||
|
compactionState_ = value;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -943,6 +1135,30 @@ public final class AdminProtos {
|
||||||
return regionInfoBuilder_;
|
return regionInfoBuilder_;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// optional .GetRegionInfoResponse.CompactionState compactionState = 2;
|
||||||
|
private org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState compactionState_ = org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState.NONE;
|
||||||
|
public boolean hasCompactionState() {
|
||||||
|
return ((bitField0_ & 0x00000002) == 0x00000002);
|
||||||
|
}
|
||||||
|
public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState getCompactionState() {
|
||||||
|
return compactionState_;
|
||||||
|
}
|
||||||
|
public Builder setCompactionState(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState value) {
|
||||||
|
if (value == null) {
|
||||||
|
throw new NullPointerException();
|
||||||
|
}
|
||||||
|
bitField0_ |= 0x00000002;
|
||||||
|
compactionState_ = value;
|
||||||
|
onChanged();
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
public Builder clearCompactionState() {
|
||||||
|
bitField0_ = (bitField0_ & ~0x00000002);
|
||||||
|
compactionState_ = org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState.NONE;
|
||||||
|
onChanged();
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
// @@protoc_insertion_point(builder_scope:GetRegionInfoResponse)
|
// @@protoc_insertion_point(builder_scope:GetRegionInfoResponse)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -15661,77 +15877,81 @@ public final class AdminProtos {
|
||||||
descriptor;
|
descriptor;
|
||||||
static {
|
static {
|
||||||
java.lang.String[] descriptorData = {
|
java.lang.String[] descriptorData = {
|
||||||
"\n\013Admin.proto\032\013hbase.proto\"8\n\024GetRegionI" +
|
"\n\013Admin.proto\032\013hbase.proto\"Q\n\024GetRegionI" +
|
||||||
"nfoRequest\022 \n\006region\030\001 \002(\0132\020.RegionSpeci" +
|
"nfoRequest\022 \n\006region\030\001 \002(\0132\020.RegionSpeci" +
|
||||||
"fier\"8\n\025GetRegionInfoResponse\022\037\n\nregionI" +
|
"fier\022\027\n\017compactionState\030\002 \001(\010\"\301\001\n\025GetReg" +
|
||||||
"nfo\030\001 \002(\0132\013.RegionInfo\"G\n\023GetStoreFileRe" +
|
"ionInfoResponse\022\037\n\nregionInfo\030\001 \002(\0132\013.Re" +
|
||||||
"quest\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier\022" +
|
"gionInfo\022?\n\017compactionState\030\002 \001(\0162&.GetR" +
|
||||||
"\016\n\006family\030\002 \003(\014\")\n\024GetStoreFileResponse\022" +
|
"egionInfoResponse.CompactionState\"F\n\017Com" +
|
||||||
"\021\n\tstoreFile\030\001 \003(\t\"\030\n\026GetOnlineRegionReq" +
|
"pactionState\022\010\n\004NONE\020\000\022\t\n\005MINOR\020\001\022\t\n\005MAJ" +
|
||||||
"uest\":\n\027GetOnlineRegionResponse\022\037\n\nregio" +
|
"OR\020\002\022\023\n\017MAJOR_AND_MINOR\020\003\"G\n\023GetStoreFil" +
|
||||||
"nInfo\030\001 \003(\0132\013.RegionInfo\"N\n\021OpenRegionRe" +
|
"eRequest\022 \n\006region\030\001 \002(\0132\020.RegionSpecifi" +
|
||||||
"quest\022\033\n\006region\030\001 \003(\0132\013.RegionInfo\022\034\n\024ve",
|
"er\022\016\n\006family\030\002 \003(\014\")\n\024GetStoreFileRespon",
|
||||||
"rsionOfOfflineNode\030\002 \001(\r\"\234\001\n\022OpenRegionR" +
|
"se\022\021\n\tstoreFile\030\001 \003(\t\"\030\n\026GetOnlineRegion" +
|
||||||
"esponse\022<\n\014openingState\030\001 \003(\0162&.OpenRegi" +
|
"Request\":\n\027GetOnlineRegionResponse\022\037\n\nre" +
|
||||||
"onResponse.RegionOpeningState\"H\n\022RegionO" +
|
"gionInfo\030\001 \003(\0132\013.RegionInfo\"N\n\021OpenRegio" +
|
||||||
"peningState\022\n\n\006OPENED\020\000\022\022\n\016ALREADY_OPENE" +
|
"nRequest\022\033\n\006region\030\001 \003(\0132\013.RegionInfo\022\034\n" +
|
||||||
"D\020\001\022\022\n\016FAILED_OPENING\020\002\"\232\001\n\022CloseRegionR" +
|
"\024versionOfOfflineNode\030\002 \001(\r\"\234\001\n\022OpenRegi" +
|
||||||
"equest\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier" +
|
"onResponse\022<\n\014openingState\030\001 \003(\0162&.OpenR" +
|
||||||
"\022\034\n\024versionOfClosingNode\030\002 \001(\r\022\034\n\016transi" +
|
"egionResponse.RegionOpeningState\"H\n\022Regi" +
|
||||||
"tionInZK\030\003 \001(\010:\004true\022&\n\021destinationServe" +
|
"onOpeningState\022\n\n\006OPENED\020\000\022\022\n\016ALREADY_OP" +
|
||||||
"r\030\004 \001(\0132\013.ServerName\"%\n\023CloseRegionRespo" +
|
"ENED\020\001\022\022\n\016FAILED_OPENING\020\002\"\232\001\n\022CloseRegi" +
|
||||||
"nse\022\016\n\006closed\030\001 \002(\010\"M\n\022FlushRegionReques",
|
"onRequest\022 \n\006region\030\001 \002(\0132\020.RegionSpecif",
|
||||||
"t\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier\022\025\n\ri" +
|
"ier\022\034\n\024versionOfClosingNode\030\002 \001(\r\022\034\n\016tra" +
|
||||||
"fOlderThanTs\030\002 \001(\004\"=\n\023FlushRegionRespons" +
|
"nsitionInZK\030\003 \001(\010:\004true\022&\n\021destinationSe" +
|
||||||
"e\022\025\n\rlastFlushTime\030\001 \002(\004\022\017\n\007flushed\030\002 \001(" +
|
"rver\030\004 \001(\0132\013.ServerName\"%\n\023CloseRegionRe" +
|
||||||
"\010\"J\n\022SplitRegionRequest\022 \n\006region\030\001 \002(\0132" +
|
"sponse\022\016\n\006closed\030\001 \002(\010\"M\n\022FlushRegionReq" +
|
||||||
"\020.RegionSpecifier\022\022\n\nsplitPoint\030\002 \001(\014\"\025\n" +
|
"uest\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier\022\025" +
|
||||||
"\023SplitRegionResponse\"G\n\024CompactRegionReq" +
|
"\n\rifOlderThanTs\030\002 \001(\004\"=\n\023FlushRegionResp" +
|
||||||
"uest\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier\022\r" +
|
"onse\022\025\n\rlastFlushTime\030\001 \002(\004\022\017\n\007flushed\030\002" +
|
||||||
"\n\005major\030\002 \001(\010\"\027\n\025CompactRegionResponse\"1" +
|
" \001(\010\"J\n\022SplitRegionRequest\022 \n\006region\030\001 \002" +
|
||||||
"\n\004UUID\022\024\n\014leastSigBits\030\001 \002(\004\022\023\n\013mostSigB" +
|
"(\0132\020.RegionSpecifier\022\022\n\nsplitPoint\030\002 \001(\014" +
|
||||||
"its\030\002 \002(\004\"\270\003\n\010WALEntry\022\035\n\003key\030\001 \002(\0132\020.WA",
|
"\"\025\n\023SplitRegionResponse\"G\n\024CompactRegion",
|
||||||
"LEntry.WALKey\022\037\n\004edit\030\002 \002(\0132\021.WALEntry.W" +
|
"Request\022 \n\006region\030\001 \002(\0132\020.RegionSpecifie" +
|
||||||
"ALEdit\032~\n\006WALKey\022\031\n\021encodedRegionName\030\001 " +
|
"r\022\r\n\005major\030\002 \001(\010\"\027\n\025CompactRegionRespons" +
|
||||||
"\002(\014\022\021\n\ttableName\030\002 \002(\014\022\031\n\021logSequenceNum" +
|
"e\"1\n\004UUID\022\024\n\014leastSigBits\030\001 \002(\004\022\023\n\013mostS" +
|
||||||
"ber\030\003 \002(\004\022\021\n\twriteTime\030\004 \002(\004\022\030\n\tclusterI" +
|
"igBits\030\002 \002(\004\"\270\003\n\010WALEntry\022\035\n\003key\030\001 \002(\0132\020" +
|
||||||
"d\030\005 \001(\0132\005.UUID\032\353\001\n\007WALEdit\022\025\n\rkeyValueBy" +
|
".WALEntry.WALKey\022\037\n\004edit\030\002 \002(\0132\021.WALEntr" +
|
||||||
"tes\030\001 \003(\014\0222\n\013familyScope\030\002 \003(\0132\035.WALEntr" +
|
"y.WALEdit\032~\n\006WALKey\022\031\n\021encodedRegionName" +
|
||||||
"y.WALEdit.FamilyScope\032M\n\013FamilyScope\022\016\n\006" +
|
"\030\001 \002(\014\022\021\n\ttableName\030\002 \002(\014\022\031\n\021logSequence" +
|
||||||
"family\030\001 \002(\014\022.\n\tscopeType\030\002 \002(\0162\033.WALEnt" +
|
"Number\030\003 \002(\004\022\021\n\twriteTime\030\004 \002(\004\022\030\n\tclust" +
|
||||||
"ry.WALEdit.ScopeType\"F\n\tScopeType\022\033\n\027REP" +
|
"erId\030\005 \001(\0132\005.UUID\032\353\001\n\007WALEdit\022\025\n\rkeyValu" +
|
||||||
"LICATION_SCOPE_LOCAL\020\000\022\034\n\030REPLICATION_SC",
|
"eBytes\030\001 \003(\014\0222\n\013familyScope\030\002 \003(\0132\035.WALE",
|
||||||
"OPE_GLOBAL\020\001\"4\n\030ReplicateWALEntryRequest" +
|
"ntry.WALEdit.FamilyScope\032M\n\013FamilyScope\022" +
|
||||||
"\022\030\n\005entry\030\001 \003(\0132\t.WALEntry\"\033\n\031ReplicateW" +
|
"\016\n\006family\030\001 \002(\014\022.\n\tscopeType\030\002 \002(\0162\033.WAL" +
|
||||||
"ALEntryResponse\"\026\n\024RollWALWriterRequest\"" +
|
"Entry.WALEdit.ScopeType\"F\n\tScopeType\022\033\n\027" +
|
||||||
".\n\025RollWALWriterResponse\022\025\n\rregionToFlus" +
|
"REPLICATION_SCOPE_LOCAL\020\000\022\034\n\030REPLICATION" +
|
||||||
"h\030\001 \003(\014\"#\n\021StopServerRequest\022\016\n\006reason\030\001" +
|
"_SCOPE_GLOBAL\020\001\"4\n\030ReplicateWALEntryRequ" +
|
||||||
" \002(\t\"\024\n\022StopServerResponse\"\026\n\024GetServerI" +
|
"est\022\030\n\005entry\030\001 \003(\0132\t.WALEntry\"\033\n\031Replica" +
|
||||||
"nfoRequest\"@\n\nServerInfo\022\037\n\nserverName\030\001" +
|
"teWALEntryResponse\"\026\n\024RollWALWriterReque" +
|
||||||
" \002(\0132\013.ServerName\022\021\n\twebuiPort\030\002 \001(\r\"8\n\025" +
|
"st\".\n\025RollWALWriterResponse\022\025\n\rregionToF" +
|
||||||
"GetServerInfoResponse\022\037\n\nserverInfo\030\001 \002(" +
|
"lush\030\001 \003(\014\"#\n\021StopServerRequest\022\016\n\006reaso" +
|
||||||
"\0132\013.ServerInfo2\371\005\n\014AdminService\022>\n\rgetRe",
|
"n\030\001 \002(\t\"\024\n\022StopServerResponse\"\026\n\024GetServ",
|
||||||
"gionInfo\022\025.GetRegionInfoRequest\032\026.GetReg" +
|
"erInfoRequest\"@\n\nServerInfo\022\037\n\nserverNam" +
|
||||||
"ionInfoResponse\022;\n\014getStoreFile\022\024.GetSto" +
|
"e\030\001 \002(\0132\013.ServerName\022\021\n\twebuiPort\030\002 \001(\r\"" +
|
||||||
"reFileRequest\032\025.GetStoreFileResponse\022D\n\017" +
|
"8\n\025GetServerInfoResponse\022\037\n\nserverInfo\030\001" +
|
||||||
"getOnlineRegion\022\027.GetOnlineRegionRequest" +
|
" \002(\0132\013.ServerInfo2\371\005\n\014AdminService\022>\n\rge" +
|
||||||
"\032\030.GetOnlineRegionResponse\0225\n\nopenRegion" +
|
"tRegionInfo\022\025.GetRegionInfoRequest\032\026.Get" +
|
||||||
"\022\022.OpenRegionRequest\032\023.OpenRegionRespons" +
|
"RegionInfoResponse\022;\n\014getStoreFile\022\024.Get" +
|
||||||
"e\0228\n\013closeRegion\022\023.CloseRegionRequest\032\024." +
|
"StoreFileRequest\032\025.GetStoreFileResponse\022" +
|
||||||
"CloseRegionResponse\0228\n\013flushRegion\022\023.Flu" +
|
"D\n\017getOnlineRegion\022\027.GetOnlineRegionRequ" +
|
||||||
"shRegionRequest\032\024.FlushRegionResponse\0228\n" +
|
"est\032\030.GetOnlineRegionResponse\0225\n\nopenReg" +
|
||||||
"\013splitRegion\022\023.SplitRegionRequest\032\024.Spli",
|
"ion\022\022.OpenRegionRequest\032\023.OpenRegionResp",
|
||||||
"tRegionResponse\022>\n\rcompactRegion\022\025.Compa" +
|
"onse\0228\n\013closeRegion\022\023.CloseRegionRequest" +
|
||||||
"ctRegionRequest\032\026.CompactRegionResponse\022" +
|
"\032\024.CloseRegionResponse\0228\n\013flushRegion\022\023." +
|
||||||
"J\n\021replicateWALEntry\022\031.ReplicateWALEntry" +
|
"FlushRegionRequest\032\024.FlushRegionResponse" +
|
||||||
"Request\032\032.ReplicateWALEntryResponse\022>\n\rr" +
|
"\0228\n\013splitRegion\022\023.SplitRegionRequest\032\024.S" +
|
||||||
"ollWALWriter\022\025.RollWALWriterRequest\032\026.Ro" +
|
"plitRegionResponse\022>\n\rcompactRegion\022\025.Co" +
|
||||||
"llWALWriterResponse\022>\n\rgetServerInfo\022\025.G" +
|
"mpactRegionRequest\032\026.CompactRegionRespon" +
|
||||||
"etServerInfoRequest\032\026.GetServerInfoRespo" +
|
"se\022J\n\021replicateWALEntry\022\031.ReplicateWALEn" +
|
||||||
"nse\0225\n\nstopServer\022\022.StopServerRequest\032\023." +
|
"tryRequest\032\032.ReplicateWALEntryResponse\022>" +
|
||||||
"StopServerResponseBA\n*org.apache.hadoop." +
|
"\n\rrollWALWriter\022\025.RollWALWriterRequest\032\026" +
|
||||||
"hbase.protobuf.generatedB\013AdminProtosH\001\210",
|
".RollWALWriterResponse\022>\n\rgetServerInfo\022",
|
||||||
"\001\001\240\001\001"
|
"\025.GetServerInfoRequest\032\026.GetServerInfoRe" +
|
||||||
|
"sponse\0225\n\nstopServer\022\022.StopServerRequest" +
|
||||||
|
"\032\023.StopServerResponseBA\n*org.apache.hado" +
|
||||||
|
"op.hbase.protobuf.generatedB\013AdminProtos" +
|
||||||
|
"H\001\210\001\001\240\001\001"
|
||||||
};
|
};
|
||||||
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
|
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
|
||||||
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
|
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
|
||||||
|
@ -15743,7 +15963,7 @@ public final class AdminProtos {
|
||||||
internal_static_GetRegionInfoRequest_fieldAccessorTable = new
|
internal_static_GetRegionInfoRequest_fieldAccessorTable = new
|
||||||
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
||||||
internal_static_GetRegionInfoRequest_descriptor,
|
internal_static_GetRegionInfoRequest_descriptor,
|
||||||
new java.lang.String[] { "Region", },
|
new java.lang.String[] { "Region", "CompactionState", },
|
||||||
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest.class,
|
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest.class,
|
||||||
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest.Builder.class);
|
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest.Builder.class);
|
||||||
internal_static_GetRegionInfoResponse_descriptor =
|
internal_static_GetRegionInfoResponse_descriptor =
|
||||||
|
@ -15751,7 +15971,7 @@ public final class AdminProtos {
|
||||||
internal_static_GetRegionInfoResponse_fieldAccessorTable = new
|
internal_static_GetRegionInfoResponse_fieldAccessorTable = new
|
||||||
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
||||||
internal_static_GetRegionInfoResponse_descriptor,
|
internal_static_GetRegionInfoResponse_descriptor,
|
||||||
new java.lang.String[] { "RegionInfo", },
|
new java.lang.String[] { "RegionInfo", "CompactionState", },
|
||||||
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.class,
|
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.class,
|
||||||
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.Builder.class);
|
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.Builder.class);
|
||||||
internal_static_GetStoreFileRequest_descriptor =
|
internal_static_GetStoreFileRequest_descriptor =
|
||||||
|
|
|
@ -30,8 +30,6 @@ import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
|
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
|
||||||
|
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
|
|
|
@ -169,6 +169,7 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
|
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
|
||||||
import org.apache.hadoop.hbase.regionserver.Leases.LeaseStillHeldException;
|
import org.apache.hadoop.hbase.regionserver.Leases.LeaseStillHeldException;
|
||||||
import org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress;
|
import org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress;
|
||||||
|
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
|
||||||
import org.apache.hadoop.hbase.regionserver.handler.CloseMetaHandler;
|
import org.apache.hadoop.hbase.regionserver.handler.CloseMetaHandler;
|
||||||
import org.apache.hadoop.hbase.regionserver.handler.CloseRegionHandler;
|
import org.apache.hadoop.hbase.regionserver.handler.CloseRegionHandler;
|
||||||
import org.apache.hadoop.hbase.regionserver.handler.CloseRootHandler;
|
import org.apache.hadoop.hbase.regionserver.handler.CloseRootHandler;
|
||||||
|
@ -3288,6 +3289,10 @@ public class HRegionServer implements ClientProtocol,
|
||||||
HRegionInfo info = region.getRegionInfo();
|
HRegionInfo info = region.getRegionInfo();
|
||||||
GetRegionInfoResponse.Builder builder = GetRegionInfoResponse.newBuilder();
|
GetRegionInfoResponse.Builder builder = GetRegionInfoResponse.newBuilder();
|
||||||
builder.setRegionInfo(HRegionInfo.convert(info));
|
builder.setRegionInfo(HRegionInfo.convert(info));
|
||||||
|
if (request.hasCompactionState() && request.getCompactionState()) {
|
||||||
|
builder.setCompactionState(
|
||||||
|
CompactionRequest.getCompactionState(info.getRegionId()));
|
||||||
|
}
|
||||||
return builder.build();
|
return builder.build();
|
||||||
} catch (IOException ie) {
|
} catch (IOException ie) {
|
||||||
throw new ServiceException(ie);
|
throw new ServiceException(ie);
|
||||||
|
|
|
@ -1315,10 +1315,14 @@ public class Store extends SchemaConfigured implements HeapSize {
|
||||||
} finally {
|
} finally {
|
||||||
this.lock.readLock().unlock();
|
this.lock.readLock().unlock();
|
||||||
}
|
}
|
||||||
|
if (ret != null) {
|
||||||
|
CompactionRequest.preRequest(ret);
|
||||||
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void finishRequest(CompactionRequest cr) {
|
public void finishRequest(CompactionRequest cr) {
|
||||||
|
CompactionRequest.postRequest(cr);
|
||||||
cr.finishRequest();
|
cr.finishRequest();
|
||||||
synchronized (filesCompacting) {
|
synchronized (filesCompacting) {
|
||||||
filesCompacting.removeAll(cr.getFiles());
|
filesCompacting.removeAll(cr.getFiles());
|
||||||
|
|
|
@ -21,13 +21,16 @@ package org.apache.hadoop.hbase.regionserver.compactions;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
import java.util.concurrent.ConcurrentHashMap;
|
||||||
import java.util.concurrent.RejectedExecutionHandler;
|
import java.util.concurrent.RejectedExecutionHandler;
|
||||||
import java.util.concurrent.ThreadPoolExecutor;
|
import java.util.concurrent.ThreadPoolExecutor;
|
||||||
|
import java.util.concurrent.atomic.AtomicInteger;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
||||||
|
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
|
||||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||||
import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
||||||
import org.apache.hadoop.hbase.regionserver.Store;
|
import org.apache.hadoop.hbase.regionserver.Store;
|
||||||
|
@ -57,6 +60,14 @@ public class CompactionRequest implements Comparable<CompactionRequest>,
|
||||||
private final Long timeInNanos;
|
private final Long timeInNanos;
|
||||||
private HRegionServer server = null;
|
private HRegionServer server = null;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Map to track the number of compactions requested per region (id)
|
||||||
|
*/
|
||||||
|
private static final ConcurrentHashMap<Long, AtomicInteger>
|
||||||
|
majorCompactions = new ConcurrentHashMap<Long, AtomicInteger>();
|
||||||
|
private static final ConcurrentHashMap<Long, AtomicInteger>
|
||||||
|
minorCompactions = new ConcurrentHashMap<Long, AtomicInteger>();
|
||||||
|
|
||||||
public CompactionRequest(HRegion r, Store s,
|
public CompactionRequest(HRegion r, Store s,
|
||||||
CompactSelection files, boolean isMajor, int p) {
|
CompactSelection files, boolean isMajor, int p) {
|
||||||
Preconditions.checkNotNull(r);
|
Preconditions.checkNotNull(r);
|
||||||
|
@ -75,6 +86,58 @@ public class CompactionRequest implements Comparable<CompactionRequest>,
|
||||||
this.timeInNanos = System.nanoTime();
|
this.timeInNanos = System.nanoTime();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Find out if a given region is in compaction now.
|
||||||
|
*
|
||||||
|
* @param regionId
|
||||||
|
* @return
|
||||||
|
*/
|
||||||
|
public static CompactionState getCompactionState(
|
||||||
|
final long regionId) {
|
||||||
|
Long key = Long.valueOf(regionId);
|
||||||
|
AtomicInteger major = majorCompactions.get(key);
|
||||||
|
AtomicInteger minor = minorCompactions.get(key);
|
||||||
|
int state = 0;
|
||||||
|
if (minor != null && minor.get() > 0) {
|
||||||
|
state += 1; // use 1 to indicate minor here
|
||||||
|
}
|
||||||
|
if (major != null && major.get() > 0) {
|
||||||
|
state += 2; // use 2 to indicate major here
|
||||||
|
}
|
||||||
|
switch (state) {
|
||||||
|
case 3: // 3 = 2 + 1, so both major and minor
|
||||||
|
return CompactionState.MAJOR_AND_MINOR;
|
||||||
|
case 2:
|
||||||
|
return CompactionState.MAJOR;
|
||||||
|
case 1:
|
||||||
|
return CompactionState.MINOR;
|
||||||
|
default:
|
||||||
|
return CompactionState.NONE;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public static void preRequest(final CompactionRequest cr){
|
||||||
|
Long key = Long.valueOf(cr.getHRegion().getRegionId());
|
||||||
|
ConcurrentHashMap<Long, AtomicInteger> compactions =
|
||||||
|
cr.isMajor() ? majorCompactions : minorCompactions;
|
||||||
|
AtomicInteger count = compactions.get(key);
|
||||||
|
if (count == null) {
|
||||||
|
compactions.putIfAbsent(key, new AtomicInteger(0));
|
||||||
|
count = compactions.get(key);
|
||||||
|
}
|
||||||
|
count.incrementAndGet();
|
||||||
|
}
|
||||||
|
|
||||||
|
public static void postRequest(final CompactionRequest cr){
|
||||||
|
Long key = Long.valueOf(cr.getHRegion().getRegionId());
|
||||||
|
ConcurrentHashMap<Long, AtomicInteger> compactions =
|
||||||
|
cr.isMajor() ? majorCompactions : minorCompactions;
|
||||||
|
AtomicInteger count = compactions.get(key);
|
||||||
|
if (count != null) {
|
||||||
|
count.decrementAndGet();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
public void finishRequest() {
|
public void finishRequest() {
|
||||||
this.compactSelection.finishRequest();
|
this.compactSelection.finishRequest();
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,10 +28,19 @@ import "hbase.proto";
|
||||||
|
|
||||||
message GetRegionInfoRequest {
|
message GetRegionInfoRequest {
|
||||||
required RegionSpecifier region = 1;
|
required RegionSpecifier region = 1;
|
||||||
|
optional bool compactionState = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
message GetRegionInfoResponse {
|
message GetRegionInfoResponse {
|
||||||
required RegionInfo regionInfo = 1;
|
required RegionInfo regionInfo = 1;
|
||||||
|
optional CompactionState compactionState = 2;
|
||||||
|
|
||||||
|
enum CompactionState {
|
||||||
|
NONE = 0;
|
||||||
|
MINOR = 1;
|
||||||
|
MAJOR = 2;
|
||||||
|
MAJOR_AND_MINOR = 3;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -151,6 +151,11 @@
|
||||||
<td><%= hbadmin.isTableEnabled(table.getTableName()) %></td>
|
<td><%= hbadmin.isTableEnabled(table.getTableName()) %></td>
|
||||||
<td>Is the table enabled</td>
|
<td>Is the table enabled</td>
|
||||||
</tr>
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td>Compaction</td>
|
||||||
|
<td><%= hbadmin.getCompactionState(table.getTableName()) %></td>
|
||||||
|
<td>Is the table compacting</td>
|
||||||
|
</tr>
|
||||||
<% if (showFragmentation) { %>
|
<% if (showFragmentation) { %>
|
||||||
<tr>
|
<tr>
|
||||||
<td>Fragmentation</td>
|
<td>Fragmentation</td>
|
||||||
|
|
|
@ -0,0 +1,171 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.hbase.regionserver;
|
||||||
|
|
||||||
|
import static org.junit.Assert.assertEquals;
|
||||||
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Random;
|
||||||
|
|
||||||
|
import org.apache.commons.logging.Log;
|
||||||
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
|
import org.apache.hadoop.hbase.MediumTests;
|
||||||
|
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||||
|
import org.apache.hadoop.hbase.client.HTable;
|
||||||
|
import org.apache.hadoop.hbase.client.Put;
|
||||||
|
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
|
||||||
|
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
|
||||||
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
import org.apache.hadoop.hbase.util.TestRegionSplitCalculator;
|
||||||
|
import org.junit.AfterClass;
|
||||||
|
import org.junit.BeforeClass;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.junit.experimental.categories.Category;
|
||||||
|
|
||||||
|
/** Unit tests to test retrieving table/region compaction state*/
|
||||||
|
@Category(MediumTests.class)
|
||||||
|
public class TestCompactionState {
|
||||||
|
final static Log LOG = LogFactory.getLog(TestCompactionState.class);
|
||||||
|
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
|
||||||
|
private final static Random random = new Random();
|
||||||
|
|
||||||
|
@BeforeClass
|
||||||
|
public static void setUpBeforeClass() throws Exception {
|
||||||
|
TEST_UTIL.startMiniCluster();
|
||||||
|
}
|
||||||
|
|
||||||
|
@AfterClass
|
||||||
|
public static void tearDownAfterClass() throws Exception {
|
||||||
|
TEST_UTIL.shutdownMiniCluster();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout=60000)
|
||||||
|
public void testMajorCompaction() throws IOException, InterruptedException {
|
||||||
|
compaction("testMajorCompaction", 8, CompactionState.MAJOR);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout=60000)
|
||||||
|
public void testMinorCompaction() throws IOException, InterruptedException {
|
||||||
|
compaction("testMinorCompaction", 15, CompactionState.MINOR);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Load data to a table, flush it to disk, trigger compaction,
|
||||||
|
* confirm the compaction state is right and wait till it is done.
|
||||||
|
*
|
||||||
|
* @param tableName
|
||||||
|
* @param flushes
|
||||||
|
* @param expectedState
|
||||||
|
* @throws IOException
|
||||||
|
* @throws InterruptedException
|
||||||
|
*/
|
||||||
|
private void compaction(final String tableName, final int flushes,
|
||||||
|
final CompactionState expectedState) throws IOException, InterruptedException {
|
||||||
|
// Create a table with regions
|
||||||
|
byte [] table = Bytes.toBytes(tableName);
|
||||||
|
byte [] family = Bytes.toBytes("family");
|
||||||
|
HTable ht = null;
|
||||||
|
try {
|
||||||
|
ht = TEST_UTIL.createTable(table, family);
|
||||||
|
loadData(ht, family, 3000, flushes);
|
||||||
|
HRegionServer rs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0);
|
||||||
|
List<HRegion> regions = rs.getOnlineRegions(table);
|
||||||
|
int countBefore = countStoreFiles(regions, family);
|
||||||
|
assertTrue(countBefore > 0); // there should be some data files
|
||||||
|
HBaseAdmin admin = new HBaseAdmin(TEST_UTIL.getConfiguration());
|
||||||
|
if (expectedState == CompactionState.MINOR) {
|
||||||
|
admin.compact(tableName);
|
||||||
|
} else {
|
||||||
|
admin.majorCompact(table);
|
||||||
|
}
|
||||||
|
long curt = System.currentTimeMillis();
|
||||||
|
long waitTime = 5000;
|
||||||
|
long endt = curt + waitTime;
|
||||||
|
CompactionState state = admin.getCompactionState(table);
|
||||||
|
while (state == CompactionState.NONE && curt < endt) {
|
||||||
|
Thread.sleep(10);
|
||||||
|
state = admin.getCompactionState(table);
|
||||||
|
curt = System.currentTimeMillis();
|
||||||
|
}
|
||||||
|
// Now, should have the right compaction state,
|
||||||
|
// otherwise, the compaction should have already been done
|
||||||
|
if (expectedState != state) {
|
||||||
|
for (HRegion region: regions) {
|
||||||
|
state = CompactionRequest.getCompactionState(region.getRegionId());
|
||||||
|
assertEquals(CompactionState.NONE, state);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
curt = System.currentTimeMillis();
|
||||||
|
waitTime = 20000;
|
||||||
|
endt = curt + waitTime;
|
||||||
|
state = admin.getCompactionState(table);
|
||||||
|
while (state != CompactionState.NONE && curt < endt) {
|
||||||
|
Thread.sleep(10);
|
||||||
|
state = admin.getCompactionState(table);
|
||||||
|
curt = System.currentTimeMillis();
|
||||||
|
}
|
||||||
|
// Now, compaction should be done.
|
||||||
|
assertEquals(CompactionState.NONE, state);
|
||||||
|
}
|
||||||
|
int countAfter = countStoreFiles(regions, family);
|
||||||
|
assertTrue(countAfter < countBefore);
|
||||||
|
if (expectedState == CompactionState.MAJOR) assertTrue(1 == countAfter);
|
||||||
|
else assertTrue(1 < countAfter);
|
||||||
|
} finally {
|
||||||
|
if (ht != null) {
|
||||||
|
TEST_UTIL.deleteTable(table);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static int countStoreFiles(
|
||||||
|
List<HRegion> regions, final byte[] family) {
|
||||||
|
int count = 0;
|
||||||
|
for (HRegion region: regions) {
|
||||||
|
count += region.getStoreFileList(new byte[][]{family}).size();
|
||||||
|
}
|
||||||
|
return count;
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void loadData(final HTable ht, final byte[] family,
|
||||||
|
final int rows, final int flushes) throws IOException {
|
||||||
|
List<Put> puts = new ArrayList<Put>(rows);
|
||||||
|
byte[] qualifier = Bytes.toBytes("val");
|
||||||
|
for (int i = 0; i < flushes; i++) {
|
||||||
|
for (int k = 0; k < rows; k++) {
|
||||||
|
byte[] row = Bytes.toBytes(random.nextLong());
|
||||||
|
Put p = new Put(row);
|
||||||
|
p.add(family, qualifier, row);
|
||||||
|
puts.add(p);
|
||||||
|
}
|
||||||
|
ht.put(puts);
|
||||||
|
ht.flushCommits();
|
||||||
|
TEST_UTIL.flush();
|
||||||
|
puts.clear();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@org.junit.Rule
|
||||||
|
public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu =
|
||||||
|
new org.apache.hadoop.hbase.ResourceCheckerJUnitRule();
|
||||||
|
}
|
|
@ -36,6 +36,10 @@
|
||||||
<description>General client pause value. Used mostly as value to wait
|
<description>General client pause value. Used mostly as value to wait
|
||||||
before running a retry of a failed get, region lookup, etc.</description>
|
before running a retry of a failed get, region lookup, etc.</description>
|
||||||
</property>
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>hbase.defaults.for.version.skip</name>
|
||||||
|
<value>true</value>
|
||||||
|
</property>
|
||||||
<property>
|
<property>
|
||||||
<name>hbase.client.retries.number</name>
|
<name>hbase.client.retries.number</name>
|
||||||
<value>10</value>
|
<value>10</value>
|
||||||
|
|
Loading…
Reference in New Issue