HBASE-6033 Adding some fuction to check if a table/region is in compaction (Jimmy)
git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1342149 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
36bac7b88b
commit
c35206b420
|
@ -62,7 +62,9 @@ import org.apache.hadoop.hbase.ipc.HMasterInterface;
|
|||
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
||||
import org.apache.hadoop.hbase.protobuf.RequestConverter;
|
||||
import org.apache.hadoop.hbase.protobuf.ResponseConverter;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest;
|
||||
|
@ -72,8 +74,6 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRespo
|
|||
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionRequest;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionRequest;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest;
|
||||
|
@ -1823,6 +1823,98 @@ public class HBaseAdmin implements Abortable, Closeable {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the current compaction state of a table or region.
|
||||
* It could be in a major compaction, a minor compaction, both, or none.
|
||||
*
|
||||
* @param tableNameOrRegionName table or region to major compact
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
* @throws InterruptedException
|
||||
* @return the current compaction state
|
||||
*/
|
||||
public CompactionState getCompactionState(final String tableNameOrRegionName)
|
||||
throws IOException, InterruptedException {
|
||||
return getCompactionState(Bytes.toBytes(tableNameOrRegionName));
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the current compaction state of a table or region.
|
||||
* It could be in a major compaction, a minor compaction, both, or none.
|
||||
*
|
||||
* @param tableNameOrRegionName table or region to major compact
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
* @throws InterruptedException
|
||||
* @return the current compaction state
|
||||
*/
|
||||
public CompactionState getCompactionState(final byte [] tableNameOrRegionName)
|
||||
throws IOException, InterruptedException {
|
||||
CompactionState state = CompactionState.NONE;
|
||||
CatalogTracker ct = getCatalogTracker();
|
||||
try {
|
||||
if (isRegionName(tableNameOrRegionName, ct)) {
|
||||
Pair<HRegionInfo, ServerName> pair =
|
||||
MetaReader.getRegion(ct, tableNameOrRegionName);
|
||||
if (pair == null || pair.getSecond() == null) {
|
||||
LOG.info("No server in .META. for " +
|
||||
Bytes.toStringBinary(tableNameOrRegionName) + "; pair=" + pair);
|
||||
} else {
|
||||
ServerName sn = pair.getSecond();
|
||||
AdminProtocol admin =
|
||||
this.connection.getAdmin(sn.getHostname(), sn.getPort());
|
||||
GetRegionInfoRequest request = RequestConverter.buildGetRegionInfoRequest(
|
||||
pair.getFirst().getRegionName(), true);
|
||||
GetRegionInfoResponse response = admin.getRegionInfo(null, request);
|
||||
return response.getCompactionState();
|
||||
}
|
||||
} else {
|
||||
final String tableName = tableNameString(tableNameOrRegionName, ct);
|
||||
List<Pair<HRegionInfo, ServerName>> pairs =
|
||||
MetaReader.getTableRegionsAndLocations(ct, tableName);
|
||||
for (Pair<HRegionInfo, ServerName> pair: pairs) {
|
||||
if (pair.getFirst().isOffline()) continue;
|
||||
if (pair.getSecond() == null) continue;
|
||||
try {
|
||||
ServerName sn = pair.getSecond();
|
||||
AdminProtocol admin =
|
||||
this.connection.getAdmin(sn.getHostname(), sn.getPort());
|
||||
GetRegionInfoRequest request = RequestConverter.buildGetRegionInfoRequest(
|
||||
pair.getFirst().getRegionName(), true);
|
||||
GetRegionInfoResponse response = admin.getRegionInfo(null, request);
|
||||
switch (response.getCompactionState()) {
|
||||
case MAJOR_AND_MINOR:
|
||||
return CompactionState.MAJOR_AND_MINOR;
|
||||
case MAJOR:
|
||||
if (state == CompactionState.MINOR) {
|
||||
return CompactionState.MAJOR_AND_MINOR;
|
||||
}
|
||||
state = CompactionState.MAJOR;
|
||||
break;
|
||||
case MINOR:
|
||||
if (state == CompactionState.MAJOR) {
|
||||
return CompactionState.MAJOR_AND_MINOR;
|
||||
}
|
||||
state = CompactionState.MINOR;
|
||||
break;
|
||||
case NONE:
|
||||
default: // nothing, continue
|
||||
}
|
||||
} catch (NotServingRegionException e) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Trying to get compaction state of " +
|
||||
pair.getFirst() + ": " +
|
||||
StringUtils.stringifyException(e));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (ServiceException se) {
|
||||
throw ProtobufUtil.getRemoteException(se);
|
||||
} finally {
|
||||
cleanupCatalogTracker(ct);
|
||||
}
|
||||
return state;
|
||||
}
|
||||
|
||||
/**
|
||||
* @see {@link #execute}
|
||||
*/
|
||||
|
|
|
@ -516,20 +516,36 @@ public final class RequestConverter {
|
|||
// End utilities for Client
|
||||
//Start utilities for Admin
|
||||
|
||||
/**
|
||||
* Create a protocol buffer GetRegionInfoRequest for a given region name
|
||||
*
|
||||
* @param regionName the name of the region to get info
|
||||
* @return a protocol buffer GetRegionInfoRequest
|
||||
*/
|
||||
public static GetRegionInfoRequest
|
||||
buildGetRegionInfoRequest(final byte[] regionName) {
|
||||
GetRegionInfoRequest.Builder builder = GetRegionInfoRequest.newBuilder();
|
||||
RegionSpecifier region = buildRegionSpecifier(
|
||||
RegionSpecifierType.REGION_NAME, regionName);
|
||||
builder.setRegion(region);
|
||||
return builder.build();
|
||||
}
|
||||
/**
|
||||
* Create a protocol buffer GetRegionInfoRequest for a given region name
|
||||
*
|
||||
* @param regionName the name of the region to get info
|
||||
* @return a protocol buffer GetRegionInfoRequest
|
||||
*/
|
||||
public static GetRegionInfoRequest
|
||||
buildGetRegionInfoRequest(final byte[] regionName) {
|
||||
return buildGetRegionInfoRequest(regionName, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a protocol buffer GetRegionInfoRequest for a given region name
|
||||
*
|
||||
* @param regionName the name of the region to get info
|
||||
* @param includeCompactionState indicate if the compaction state is requested
|
||||
* @return a protocol buffer GetRegionInfoRequest
|
||||
*/
|
||||
public static GetRegionInfoRequest
|
||||
buildGetRegionInfoRequest(final byte[] regionName,
|
||||
final boolean includeCompactionState) {
|
||||
GetRegionInfoRequest.Builder builder = GetRegionInfoRequest.newBuilder();
|
||||
RegionSpecifier region = buildRegionSpecifier(
|
||||
RegionSpecifierType.REGION_NAME, regionName);
|
||||
builder.setRegion(region);
|
||||
if (includeCompactionState) {
|
||||
builder.setCompactionState(includeCompactionState);
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a protocol buffer GetStoreFileRequest for a given region name
|
||||
|
@ -882,7 +898,6 @@ public final class RequestConverter {
|
|||
public static UnassignRegionRequest buildUnassignRegionRequest(
|
||||
final byte [] regionName, final boolean force) {
|
||||
UnassignRegionRequest.Builder builder = UnassignRegionRequest.newBuilder();
|
||||
RegionSpecifier.Builder rspec = RegionSpecifier.newBuilder();
|
||||
builder.setRegion(buildRegionSpecifier(RegionSpecifierType.REGION_NAME,regionName));
|
||||
builder.setForce(force);
|
||||
return builder.build();
|
||||
|
|
|
@ -15,6 +15,10 @@ public final class AdminProtos {
|
|||
boolean hasRegion();
|
||||
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegion();
|
||||
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder();
|
||||
|
||||
// optional bool compactionState = 2;
|
||||
boolean hasCompactionState();
|
||||
boolean getCompactionState();
|
||||
}
|
||||
public static final class GetRegionInfoRequest extends
|
||||
com.google.protobuf.GeneratedMessage
|
||||
|
@ -58,8 +62,19 @@ public final class AdminProtos {
|
|||
return region_;
|
||||
}
|
||||
|
||||
// optional bool compactionState = 2;
|
||||
public static final int COMPACTIONSTATE_FIELD_NUMBER = 2;
|
||||
private boolean compactionState_;
|
||||
public boolean hasCompactionState() {
|
||||
return ((bitField0_ & 0x00000002) == 0x00000002);
|
||||
}
|
||||
public boolean getCompactionState() {
|
||||
return compactionState_;
|
||||
}
|
||||
|
||||
private void initFields() {
|
||||
region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
|
||||
compactionState_ = false;
|
||||
}
|
||||
private byte memoizedIsInitialized = -1;
|
||||
public final boolean isInitialized() {
|
||||
|
@ -84,6 +99,9 @@ public final class AdminProtos {
|
|||
if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
||||
output.writeMessage(1, region_);
|
||||
}
|
||||
if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
||||
output.writeBool(2, compactionState_);
|
||||
}
|
||||
getUnknownFields().writeTo(output);
|
||||
}
|
||||
|
||||
|
@ -97,6 +115,10 @@ public final class AdminProtos {
|
|||
size += com.google.protobuf.CodedOutputStream
|
||||
.computeMessageSize(1, region_);
|
||||
}
|
||||
if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
||||
size += com.google.protobuf.CodedOutputStream
|
||||
.computeBoolSize(2, compactionState_);
|
||||
}
|
||||
size += getUnknownFields().getSerializedSize();
|
||||
memoizedSerializedSize = size;
|
||||
return size;
|
||||
|
@ -125,6 +147,11 @@ public final class AdminProtos {
|
|||
result = result && getRegion()
|
||||
.equals(other.getRegion());
|
||||
}
|
||||
result = result && (hasCompactionState() == other.hasCompactionState());
|
||||
if (hasCompactionState()) {
|
||||
result = result && (getCompactionState()
|
||||
== other.getCompactionState());
|
||||
}
|
||||
result = result &&
|
||||
getUnknownFields().equals(other.getUnknownFields());
|
||||
return result;
|
||||
|
@ -138,6 +165,10 @@ public final class AdminProtos {
|
|||
hash = (37 * hash) + REGION_FIELD_NUMBER;
|
||||
hash = (53 * hash) + getRegion().hashCode();
|
||||
}
|
||||
if (hasCompactionState()) {
|
||||
hash = (37 * hash) + COMPACTIONSTATE_FIELD_NUMBER;
|
||||
hash = (53 * hash) + hashBoolean(getCompactionState());
|
||||
}
|
||||
hash = (29 * hash) + getUnknownFields().hashCode();
|
||||
return hash;
|
||||
}
|
||||
|
@ -261,6 +292,8 @@ public final class AdminProtos {
|
|||
regionBuilder_.clear();
|
||||
}
|
||||
bitField0_ = (bitField0_ & ~0x00000001);
|
||||
compactionState_ = false;
|
||||
bitField0_ = (bitField0_ & ~0x00000002);
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -307,6 +340,10 @@ public final class AdminProtos {
|
|||
} else {
|
||||
result.region_ = regionBuilder_.build();
|
||||
}
|
||||
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
|
||||
to_bitField0_ |= 0x00000002;
|
||||
}
|
||||
result.compactionState_ = compactionState_;
|
||||
result.bitField0_ = to_bitField0_;
|
||||
onBuilt();
|
||||
return result;
|
||||
|
@ -326,6 +363,9 @@ public final class AdminProtos {
|
|||
if (other.hasRegion()) {
|
||||
mergeRegion(other.getRegion());
|
||||
}
|
||||
if (other.hasCompactionState()) {
|
||||
setCompactionState(other.getCompactionState());
|
||||
}
|
||||
this.mergeUnknownFields(other.getUnknownFields());
|
||||
return this;
|
||||
}
|
||||
|
@ -374,6 +414,11 @@ public final class AdminProtos {
|
|||
setRegion(subBuilder.buildPartial());
|
||||
break;
|
||||
}
|
||||
case 16: {
|
||||
bitField0_ |= 0x00000002;
|
||||
compactionState_ = input.readBool();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -470,6 +515,27 @@ public final class AdminProtos {
|
|||
return regionBuilder_;
|
||||
}
|
||||
|
||||
// optional bool compactionState = 2;
|
||||
private boolean compactionState_ ;
|
||||
public boolean hasCompactionState() {
|
||||
return ((bitField0_ & 0x00000002) == 0x00000002);
|
||||
}
|
||||
public boolean getCompactionState() {
|
||||
return compactionState_;
|
||||
}
|
||||
public Builder setCompactionState(boolean value) {
|
||||
bitField0_ |= 0x00000002;
|
||||
compactionState_ = value;
|
||||
onChanged();
|
||||
return this;
|
||||
}
|
||||
public Builder clearCompactionState() {
|
||||
bitField0_ = (bitField0_ & ~0x00000002);
|
||||
compactionState_ = false;
|
||||
onChanged();
|
||||
return this;
|
||||
}
|
||||
|
||||
// @@protoc_insertion_point(builder_scope:GetRegionInfoRequest)
|
||||
}
|
||||
|
||||
|
@ -488,6 +554,10 @@ public final class AdminProtos {
|
|||
boolean hasRegionInfo();
|
||||
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo();
|
||||
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder();
|
||||
|
||||
// optional .GetRegionInfoResponse.CompactionState compactionState = 2;
|
||||
boolean hasCompactionState();
|
||||
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState getCompactionState();
|
||||
}
|
||||
public static final class GetRegionInfoResponse extends
|
||||
com.google.protobuf.GeneratedMessage
|
||||
|
@ -517,6 +587,81 @@ public final class AdminProtos {
|
|||
return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_GetRegionInfoResponse_fieldAccessorTable;
|
||||
}
|
||||
|
||||
public enum CompactionState
|
||||
implements com.google.protobuf.ProtocolMessageEnum {
|
||||
NONE(0, 0),
|
||||
MINOR(1, 1),
|
||||
MAJOR(2, 2),
|
||||
MAJOR_AND_MINOR(3, 3),
|
||||
;
|
||||
|
||||
public static final int NONE_VALUE = 0;
|
||||
public static final int MINOR_VALUE = 1;
|
||||
public static final int MAJOR_VALUE = 2;
|
||||
public static final int MAJOR_AND_MINOR_VALUE = 3;
|
||||
|
||||
|
||||
public final int getNumber() { return value; }
|
||||
|
||||
public static CompactionState valueOf(int value) {
|
||||
switch (value) {
|
||||
case 0: return NONE;
|
||||
case 1: return MINOR;
|
||||
case 2: return MAJOR;
|
||||
case 3: return MAJOR_AND_MINOR;
|
||||
default: return null;
|
||||
}
|
||||
}
|
||||
|
||||
public static com.google.protobuf.Internal.EnumLiteMap<CompactionState>
|
||||
internalGetValueMap() {
|
||||
return internalValueMap;
|
||||
}
|
||||
private static com.google.protobuf.Internal.EnumLiteMap<CompactionState>
|
||||
internalValueMap =
|
||||
new com.google.protobuf.Internal.EnumLiteMap<CompactionState>() {
|
||||
public CompactionState findValueByNumber(int number) {
|
||||
return CompactionState.valueOf(number);
|
||||
}
|
||||
};
|
||||
|
||||
public final com.google.protobuf.Descriptors.EnumValueDescriptor
|
||||
getValueDescriptor() {
|
||||
return getDescriptor().getValues().get(index);
|
||||
}
|
||||
public final com.google.protobuf.Descriptors.EnumDescriptor
|
||||
getDescriptorForType() {
|
||||
return getDescriptor();
|
||||
}
|
||||
public static final com.google.protobuf.Descriptors.EnumDescriptor
|
||||
getDescriptor() {
|
||||
return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.getDescriptor().getEnumTypes().get(0);
|
||||
}
|
||||
|
||||
private static final CompactionState[] VALUES = {
|
||||
NONE, MINOR, MAJOR, MAJOR_AND_MINOR,
|
||||
};
|
||||
|
||||
public static CompactionState valueOf(
|
||||
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
|
||||
if (desc.getType() != getDescriptor()) {
|
||||
throw new java.lang.IllegalArgumentException(
|
||||
"EnumValueDescriptor is not for this type.");
|
||||
}
|
||||
return VALUES[desc.getIndex()];
|
||||
}
|
||||
|
||||
private final int index;
|
||||
private final int value;
|
||||
|
||||
private CompactionState(int index, int value) {
|
||||
this.index = index;
|
||||
this.value = value;
|
||||
}
|
||||
|
||||
// @@protoc_insertion_point(enum_scope:GetRegionInfoResponse.CompactionState)
|
||||
}
|
||||
|
||||
private int bitField0_;
|
||||
// required .RegionInfo regionInfo = 1;
|
||||
public static final int REGIONINFO_FIELD_NUMBER = 1;
|
||||
|
@ -531,8 +676,19 @@ public final class AdminProtos {
|
|||
return regionInfo_;
|
||||
}
|
||||
|
||||
// optional .GetRegionInfoResponse.CompactionState compactionState = 2;
|
||||
public static final int COMPACTIONSTATE_FIELD_NUMBER = 2;
|
||||
private org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState compactionState_;
|
||||
public boolean hasCompactionState() {
|
||||
return ((bitField0_ & 0x00000002) == 0x00000002);
|
||||
}
|
||||
public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState getCompactionState() {
|
||||
return compactionState_;
|
||||
}
|
||||
|
||||
private void initFields() {
|
||||
regionInfo_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance();
|
||||
compactionState_ = org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState.NONE;
|
||||
}
|
||||
private byte memoizedIsInitialized = -1;
|
||||
public final boolean isInitialized() {
|
||||
|
@ -557,6 +713,9 @@ public final class AdminProtos {
|
|||
if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
||||
output.writeMessage(1, regionInfo_);
|
||||
}
|
||||
if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
||||
output.writeEnum(2, compactionState_.getNumber());
|
||||
}
|
||||
getUnknownFields().writeTo(output);
|
||||
}
|
||||
|
||||
|
@ -570,6 +729,10 @@ public final class AdminProtos {
|
|||
size += com.google.protobuf.CodedOutputStream
|
||||
.computeMessageSize(1, regionInfo_);
|
||||
}
|
||||
if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
||||
size += com.google.protobuf.CodedOutputStream
|
||||
.computeEnumSize(2, compactionState_.getNumber());
|
||||
}
|
||||
size += getUnknownFields().getSerializedSize();
|
||||
memoizedSerializedSize = size;
|
||||
return size;
|
||||
|
@ -598,6 +761,11 @@ public final class AdminProtos {
|
|||
result = result && getRegionInfo()
|
||||
.equals(other.getRegionInfo());
|
||||
}
|
||||
result = result && (hasCompactionState() == other.hasCompactionState());
|
||||
if (hasCompactionState()) {
|
||||
result = result &&
|
||||
(getCompactionState() == other.getCompactionState());
|
||||
}
|
||||
result = result &&
|
||||
getUnknownFields().equals(other.getUnknownFields());
|
||||
return result;
|
||||
|
@ -611,6 +779,10 @@ public final class AdminProtos {
|
|||
hash = (37 * hash) + REGIONINFO_FIELD_NUMBER;
|
||||
hash = (53 * hash) + getRegionInfo().hashCode();
|
||||
}
|
||||
if (hasCompactionState()) {
|
||||
hash = (37 * hash) + COMPACTIONSTATE_FIELD_NUMBER;
|
||||
hash = (53 * hash) + hashEnum(getCompactionState());
|
||||
}
|
||||
hash = (29 * hash) + getUnknownFields().hashCode();
|
||||
return hash;
|
||||
}
|
||||
|
@ -734,6 +906,8 @@ public final class AdminProtos {
|
|||
regionInfoBuilder_.clear();
|
||||
}
|
||||
bitField0_ = (bitField0_ & ~0x00000001);
|
||||
compactionState_ = org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState.NONE;
|
||||
bitField0_ = (bitField0_ & ~0x00000002);
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -780,6 +954,10 @@ public final class AdminProtos {
|
|||
} else {
|
||||
result.regionInfo_ = regionInfoBuilder_.build();
|
||||
}
|
||||
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
|
||||
to_bitField0_ |= 0x00000002;
|
||||
}
|
||||
result.compactionState_ = compactionState_;
|
||||
result.bitField0_ = to_bitField0_;
|
||||
onBuilt();
|
||||
return result;
|
||||
|
@ -799,6 +977,9 @@ public final class AdminProtos {
|
|||
if (other.hasRegionInfo()) {
|
||||
mergeRegionInfo(other.getRegionInfo());
|
||||
}
|
||||
if (other.hasCompactionState()) {
|
||||
setCompactionState(other.getCompactionState());
|
||||
}
|
||||
this.mergeUnknownFields(other.getUnknownFields());
|
||||
return this;
|
||||
}
|
||||
|
@ -847,6 +1028,17 @@ public final class AdminProtos {
|
|||
setRegionInfo(subBuilder.buildPartial());
|
||||
break;
|
||||
}
|
||||
case 16: {
|
||||
int rawValue = input.readEnum();
|
||||
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState value = org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState.valueOf(rawValue);
|
||||
if (value == null) {
|
||||
unknownFields.mergeVarintField(2, rawValue);
|
||||
} else {
|
||||
bitField0_ |= 0x00000002;
|
||||
compactionState_ = value;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -943,6 +1135,30 @@ public final class AdminProtos {
|
|||
return regionInfoBuilder_;
|
||||
}
|
||||
|
||||
// optional .GetRegionInfoResponse.CompactionState compactionState = 2;
|
||||
private org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState compactionState_ = org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState.NONE;
|
||||
public boolean hasCompactionState() {
|
||||
return ((bitField0_ & 0x00000002) == 0x00000002);
|
||||
}
|
||||
public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState getCompactionState() {
|
||||
return compactionState_;
|
||||
}
|
||||
public Builder setCompactionState(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState value) {
|
||||
if (value == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
bitField0_ |= 0x00000002;
|
||||
compactionState_ = value;
|
||||
onChanged();
|
||||
return this;
|
||||
}
|
||||
public Builder clearCompactionState() {
|
||||
bitField0_ = (bitField0_ & ~0x00000002);
|
||||
compactionState_ = org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState.NONE;
|
||||
onChanged();
|
||||
return this;
|
||||
}
|
||||
|
||||
// @@protoc_insertion_point(builder_scope:GetRegionInfoResponse)
|
||||
}
|
||||
|
||||
|
@ -15661,77 +15877,81 @@ public final class AdminProtos {
|
|||
descriptor;
|
||||
static {
|
||||
java.lang.String[] descriptorData = {
|
||||
"\n\013Admin.proto\032\013hbase.proto\"8\n\024GetRegionI" +
|
||||
"\n\013Admin.proto\032\013hbase.proto\"Q\n\024GetRegionI" +
|
||||
"nfoRequest\022 \n\006region\030\001 \002(\0132\020.RegionSpeci" +
|
||||
"fier\"8\n\025GetRegionInfoResponse\022\037\n\nregionI" +
|
||||
"nfo\030\001 \002(\0132\013.RegionInfo\"G\n\023GetStoreFileRe" +
|
||||
"quest\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier\022" +
|
||||
"\016\n\006family\030\002 \003(\014\")\n\024GetStoreFileResponse\022" +
|
||||
"\021\n\tstoreFile\030\001 \003(\t\"\030\n\026GetOnlineRegionReq" +
|
||||
"uest\":\n\027GetOnlineRegionResponse\022\037\n\nregio" +
|
||||
"nInfo\030\001 \003(\0132\013.RegionInfo\"N\n\021OpenRegionRe" +
|
||||
"quest\022\033\n\006region\030\001 \003(\0132\013.RegionInfo\022\034\n\024ve",
|
||||
"rsionOfOfflineNode\030\002 \001(\r\"\234\001\n\022OpenRegionR" +
|
||||
"esponse\022<\n\014openingState\030\001 \003(\0162&.OpenRegi" +
|
||||
"onResponse.RegionOpeningState\"H\n\022RegionO" +
|
||||
"peningState\022\n\n\006OPENED\020\000\022\022\n\016ALREADY_OPENE" +
|
||||
"D\020\001\022\022\n\016FAILED_OPENING\020\002\"\232\001\n\022CloseRegionR" +
|
||||
"equest\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier" +
|
||||
"\022\034\n\024versionOfClosingNode\030\002 \001(\r\022\034\n\016transi" +
|
||||
"tionInZK\030\003 \001(\010:\004true\022&\n\021destinationServe" +
|
||||
"r\030\004 \001(\0132\013.ServerName\"%\n\023CloseRegionRespo" +
|
||||
"nse\022\016\n\006closed\030\001 \002(\010\"M\n\022FlushRegionReques",
|
||||
"t\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier\022\025\n\ri" +
|
||||
"fOlderThanTs\030\002 \001(\004\"=\n\023FlushRegionRespons" +
|
||||
"e\022\025\n\rlastFlushTime\030\001 \002(\004\022\017\n\007flushed\030\002 \001(" +
|
||||
"\010\"J\n\022SplitRegionRequest\022 \n\006region\030\001 \002(\0132" +
|
||||
"\020.RegionSpecifier\022\022\n\nsplitPoint\030\002 \001(\014\"\025\n" +
|
||||
"\023SplitRegionResponse\"G\n\024CompactRegionReq" +
|
||||
"uest\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier\022\r" +
|
||||
"\n\005major\030\002 \001(\010\"\027\n\025CompactRegionResponse\"1" +
|
||||
"\n\004UUID\022\024\n\014leastSigBits\030\001 \002(\004\022\023\n\013mostSigB" +
|
||||
"its\030\002 \002(\004\"\270\003\n\010WALEntry\022\035\n\003key\030\001 \002(\0132\020.WA",
|
||||
"LEntry.WALKey\022\037\n\004edit\030\002 \002(\0132\021.WALEntry.W" +
|
||||
"ALEdit\032~\n\006WALKey\022\031\n\021encodedRegionName\030\001 " +
|
||||
"\002(\014\022\021\n\ttableName\030\002 \002(\014\022\031\n\021logSequenceNum" +
|
||||
"ber\030\003 \002(\004\022\021\n\twriteTime\030\004 \002(\004\022\030\n\tclusterI" +
|
||||
"d\030\005 \001(\0132\005.UUID\032\353\001\n\007WALEdit\022\025\n\rkeyValueBy" +
|
||||
"tes\030\001 \003(\014\0222\n\013familyScope\030\002 \003(\0132\035.WALEntr" +
|
||||
"y.WALEdit.FamilyScope\032M\n\013FamilyScope\022\016\n\006" +
|
||||
"family\030\001 \002(\014\022.\n\tscopeType\030\002 \002(\0162\033.WALEnt" +
|
||||
"ry.WALEdit.ScopeType\"F\n\tScopeType\022\033\n\027REP" +
|
||||
"LICATION_SCOPE_LOCAL\020\000\022\034\n\030REPLICATION_SC",
|
||||
"OPE_GLOBAL\020\001\"4\n\030ReplicateWALEntryRequest" +
|
||||
"\022\030\n\005entry\030\001 \003(\0132\t.WALEntry\"\033\n\031ReplicateW" +
|
||||
"ALEntryResponse\"\026\n\024RollWALWriterRequest\"" +
|
||||
".\n\025RollWALWriterResponse\022\025\n\rregionToFlus" +
|
||||
"h\030\001 \003(\014\"#\n\021StopServerRequest\022\016\n\006reason\030\001" +
|
||||
" \002(\t\"\024\n\022StopServerResponse\"\026\n\024GetServerI" +
|
||||
"nfoRequest\"@\n\nServerInfo\022\037\n\nserverName\030\001" +
|
||||
" \002(\0132\013.ServerName\022\021\n\twebuiPort\030\002 \001(\r\"8\n\025" +
|
||||
"GetServerInfoResponse\022\037\n\nserverInfo\030\001 \002(" +
|
||||
"\0132\013.ServerInfo2\371\005\n\014AdminService\022>\n\rgetRe",
|
||||
"gionInfo\022\025.GetRegionInfoRequest\032\026.GetReg" +
|
||||
"ionInfoResponse\022;\n\014getStoreFile\022\024.GetSto" +
|
||||
"reFileRequest\032\025.GetStoreFileResponse\022D\n\017" +
|
||||
"getOnlineRegion\022\027.GetOnlineRegionRequest" +
|
||||
"\032\030.GetOnlineRegionResponse\0225\n\nopenRegion" +
|
||||
"\022\022.OpenRegionRequest\032\023.OpenRegionRespons" +
|
||||
"e\0228\n\013closeRegion\022\023.CloseRegionRequest\032\024." +
|
||||
"CloseRegionResponse\0228\n\013flushRegion\022\023.Flu" +
|
||||
"shRegionRequest\032\024.FlushRegionResponse\0228\n" +
|
||||
"\013splitRegion\022\023.SplitRegionRequest\032\024.Spli",
|
||||
"tRegionResponse\022>\n\rcompactRegion\022\025.Compa" +
|
||||
"ctRegionRequest\032\026.CompactRegionResponse\022" +
|
||||
"J\n\021replicateWALEntry\022\031.ReplicateWALEntry" +
|
||||
"Request\032\032.ReplicateWALEntryResponse\022>\n\rr" +
|
||||
"ollWALWriter\022\025.RollWALWriterRequest\032\026.Ro" +
|
||||
"llWALWriterResponse\022>\n\rgetServerInfo\022\025.G" +
|
||||
"etServerInfoRequest\032\026.GetServerInfoRespo" +
|
||||
"nse\0225\n\nstopServer\022\022.StopServerRequest\032\023." +
|
||||
"StopServerResponseBA\n*org.apache.hadoop." +
|
||||
"hbase.protobuf.generatedB\013AdminProtosH\001\210",
|
||||
"\001\001\240\001\001"
|
||||
"fier\022\027\n\017compactionState\030\002 \001(\010\"\301\001\n\025GetReg" +
|
||||
"ionInfoResponse\022\037\n\nregionInfo\030\001 \002(\0132\013.Re" +
|
||||
"gionInfo\022?\n\017compactionState\030\002 \001(\0162&.GetR" +
|
||||
"egionInfoResponse.CompactionState\"F\n\017Com" +
|
||||
"pactionState\022\010\n\004NONE\020\000\022\t\n\005MINOR\020\001\022\t\n\005MAJ" +
|
||||
"OR\020\002\022\023\n\017MAJOR_AND_MINOR\020\003\"G\n\023GetStoreFil" +
|
||||
"eRequest\022 \n\006region\030\001 \002(\0132\020.RegionSpecifi" +
|
||||
"er\022\016\n\006family\030\002 \003(\014\")\n\024GetStoreFileRespon",
|
||||
"se\022\021\n\tstoreFile\030\001 \003(\t\"\030\n\026GetOnlineRegion" +
|
||||
"Request\":\n\027GetOnlineRegionResponse\022\037\n\nre" +
|
||||
"gionInfo\030\001 \003(\0132\013.RegionInfo\"N\n\021OpenRegio" +
|
||||
"nRequest\022\033\n\006region\030\001 \003(\0132\013.RegionInfo\022\034\n" +
|
||||
"\024versionOfOfflineNode\030\002 \001(\r\"\234\001\n\022OpenRegi" +
|
||||
"onResponse\022<\n\014openingState\030\001 \003(\0162&.OpenR" +
|
||||
"egionResponse.RegionOpeningState\"H\n\022Regi" +
|
||||
"onOpeningState\022\n\n\006OPENED\020\000\022\022\n\016ALREADY_OP" +
|
||||
"ENED\020\001\022\022\n\016FAILED_OPENING\020\002\"\232\001\n\022CloseRegi" +
|
||||
"onRequest\022 \n\006region\030\001 \002(\0132\020.RegionSpecif",
|
||||
"ier\022\034\n\024versionOfClosingNode\030\002 \001(\r\022\034\n\016tra" +
|
||||
"nsitionInZK\030\003 \001(\010:\004true\022&\n\021destinationSe" +
|
||||
"rver\030\004 \001(\0132\013.ServerName\"%\n\023CloseRegionRe" +
|
||||
"sponse\022\016\n\006closed\030\001 \002(\010\"M\n\022FlushRegionReq" +
|
||||
"uest\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier\022\025" +
|
||||
"\n\rifOlderThanTs\030\002 \001(\004\"=\n\023FlushRegionResp" +
|
||||
"onse\022\025\n\rlastFlushTime\030\001 \002(\004\022\017\n\007flushed\030\002" +
|
||||
" \001(\010\"J\n\022SplitRegionRequest\022 \n\006region\030\001 \002" +
|
||||
"(\0132\020.RegionSpecifier\022\022\n\nsplitPoint\030\002 \001(\014" +
|
||||
"\"\025\n\023SplitRegionResponse\"G\n\024CompactRegion",
|
||||
"Request\022 \n\006region\030\001 \002(\0132\020.RegionSpecifie" +
|
||||
"r\022\r\n\005major\030\002 \001(\010\"\027\n\025CompactRegionRespons" +
|
||||
"e\"1\n\004UUID\022\024\n\014leastSigBits\030\001 \002(\004\022\023\n\013mostS" +
|
||||
"igBits\030\002 \002(\004\"\270\003\n\010WALEntry\022\035\n\003key\030\001 \002(\0132\020" +
|
||||
".WALEntry.WALKey\022\037\n\004edit\030\002 \002(\0132\021.WALEntr" +
|
||||
"y.WALEdit\032~\n\006WALKey\022\031\n\021encodedRegionName" +
|
||||
"\030\001 \002(\014\022\021\n\ttableName\030\002 \002(\014\022\031\n\021logSequence" +
|
||||
"Number\030\003 \002(\004\022\021\n\twriteTime\030\004 \002(\004\022\030\n\tclust" +
|
||||
"erId\030\005 \001(\0132\005.UUID\032\353\001\n\007WALEdit\022\025\n\rkeyValu" +
|
||||
"eBytes\030\001 \003(\014\0222\n\013familyScope\030\002 \003(\0132\035.WALE",
|
||||
"ntry.WALEdit.FamilyScope\032M\n\013FamilyScope\022" +
|
||||
"\016\n\006family\030\001 \002(\014\022.\n\tscopeType\030\002 \002(\0162\033.WAL" +
|
||||
"Entry.WALEdit.ScopeType\"F\n\tScopeType\022\033\n\027" +
|
||||
"REPLICATION_SCOPE_LOCAL\020\000\022\034\n\030REPLICATION" +
|
||||
"_SCOPE_GLOBAL\020\001\"4\n\030ReplicateWALEntryRequ" +
|
||||
"est\022\030\n\005entry\030\001 \003(\0132\t.WALEntry\"\033\n\031Replica" +
|
||||
"teWALEntryResponse\"\026\n\024RollWALWriterReque" +
|
||||
"st\".\n\025RollWALWriterResponse\022\025\n\rregionToF" +
|
||||
"lush\030\001 \003(\014\"#\n\021StopServerRequest\022\016\n\006reaso" +
|
||||
"n\030\001 \002(\t\"\024\n\022StopServerResponse\"\026\n\024GetServ",
|
||||
"erInfoRequest\"@\n\nServerInfo\022\037\n\nserverNam" +
|
||||
"e\030\001 \002(\0132\013.ServerName\022\021\n\twebuiPort\030\002 \001(\r\"" +
|
||||
"8\n\025GetServerInfoResponse\022\037\n\nserverInfo\030\001" +
|
||||
" \002(\0132\013.ServerInfo2\371\005\n\014AdminService\022>\n\rge" +
|
||||
"tRegionInfo\022\025.GetRegionInfoRequest\032\026.Get" +
|
||||
"RegionInfoResponse\022;\n\014getStoreFile\022\024.Get" +
|
||||
"StoreFileRequest\032\025.GetStoreFileResponse\022" +
|
||||
"D\n\017getOnlineRegion\022\027.GetOnlineRegionRequ" +
|
||||
"est\032\030.GetOnlineRegionResponse\0225\n\nopenReg" +
|
||||
"ion\022\022.OpenRegionRequest\032\023.OpenRegionResp",
|
||||
"onse\0228\n\013closeRegion\022\023.CloseRegionRequest" +
|
||||
"\032\024.CloseRegionResponse\0228\n\013flushRegion\022\023." +
|
||||
"FlushRegionRequest\032\024.FlushRegionResponse" +
|
||||
"\0228\n\013splitRegion\022\023.SplitRegionRequest\032\024.S" +
|
||||
"plitRegionResponse\022>\n\rcompactRegion\022\025.Co" +
|
||||
"mpactRegionRequest\032\026.CompactRegionRespon" +
|
||||
"se\022J\n\021replicateWALEntry\022\031.ReplicateWALEn" +
|
||||
"tryRequest\032\032.ReplicateWALEntryResponse\022>" +
|
||||
"\n\rrollWALWriter\022\025.RollWALWriterRequest\032\026" +
|
||||
".RollWALWriterResponse\022>\n\rgetServerInfo\022",
|
||||
"\025.GetServerInfoRequest\032\026.GetServerInfoRe" +
|
||||
"sponse\0225\n\nstopServer\022\022.StopServerRequest" +
|
||||
"\032\023.StopServerResponseBA\n*org.apache.hado" +
|
||||
"op.hbase.protobuf.generatedB\013AdminProtos" +
|
||||
"H\001\210\001\001\240\001\001"
|
||||
};
|
||||
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
|
||||
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
|
||||
|
@ -15743,7 +15963,7 @@ public final class AdminProtos {
|
|||
internal_static_GetRegionInfoRequest_fieldAccessorTable = new
|
||||
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
||||
internal_static_GetRegionInfoRequest_descriptor,
|
||||
new java.lang.String[] { "Region", },
|
||||
new java.lang.String[] { "Region", "CompactionState", },
|
||||
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest.class,
|
||||
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest.Builder.class);
|
||||
internal_static_GetRegionInfoResponse_descriptor =
|
||||
|
@ -15751,7 +15971,7 @@ public final class AdminProtos {
|
|||
internal_static_GetRegionInfoResponse_fieldAccessorTable = new
|
||||
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
||||
internal_static_GetRegionInfoResponse_descriptor,
|
||||
new java.lang.String[] { "RegionInfo", },
|
||||
new java.lang.String[] { "RegionInfo", "CompactionState", },
|
||||
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.class,
|
||||
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.Builder.class);
|
||||
internal_static_GetStoreFileRequest_descriptor =
|
||||
|
|
|
@ -30,8 +30,6 @@ import org.apache.commons.logging.Log;
|
|||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
|
|
|
@ -169,6 +169,7 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier;
|
|||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
|
||||
import org.apache.hadoop.hbase.regionserver.Leases.LeaseStillHeldException;
|
||||
import org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress;
|
||||
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
|
||||
import org.apache.hadoop.hbase.regionserver.handler.CloseMetaHandler;
|
||||
import org.apache.hadoop.hbase.regionserver.handler.CloseRegionHandler;
|
||||
import org.apache.hadoop.hbase.regionserver.handler.CloseRootHandler;
|
||||
|
@ -3288,6 +3289,10 @@ public class HRegionServer implements ClientProtocol,
|
|||
HRegionInfo info = region.getRegionInfo();
|
||||
GetRegionInfoResponse.Builder builder = GetRegionInfoResponse.newBuilder();
|
||||
builder.setRegionInfo(HRegionInfo.convert(info));
|
||||
if (request.hasCompactionState() && request.getCompactionState()) {
|
||||
builder.setCompactionState(
|
||||
CompactionRequest.getCompactionState(info.getRegionId()));
|
||||
}
|
||||
return builder.build();
|
||||
} catch (IOException ie) {
|
||||
throw new ServiceException(ie);
|
||||
|
|
|
@ -1315,10 +1315,14 @@ public class Store extends SchemaConfigured implements HeapSize {
|
|||
} finally {
|
||||
this.lock.readLock().unlock();
|
||||
}
|
||||
if (ret != null) {
|
||||
CompactionRequest.preRequest(ret);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
public void finishRequest(CompactionRequest cr) {
|
||||
CompactionRequest.postRequest(cr);
|
||||
cr.finishRequest();
|
||||
synchronized (filesCompacting) {
|
||||
filesCompacting.removeAll(cr.getFiles());
|
||||
|
|
|
@ -21,13 +21,16 @@ package org.apache.hadoop.hbase.regionserver.compactions;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.RejectedExecutionHandler;
|
||||
import java.util.concurrent.ThreadPoolExecutor;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
||||
import org.apache.hadoop.hbase.regionserver.Store;
|
||||
|
@ -57,6 +60,14 @@ public class CompactionRequest implements Comparable<CompactionRequest>,
|
|||
private final Long timeInNanos;
|
||||
private HRegionServer server = null;
|
||||
|
||||
/**
|
||||
* Map to track the number of compactions requested per region (id)
|
||||
*/
|
||||
private static final ConcurrentHashMap<Long, AtomicInteger>
|
||||
majorCompactions = new ConcurrentHashMap<Long, AtomicInteger>();
|
||||
private static final ConcurrentHashMap<Long, AtomicInteger>
|
||||
minorCompactions = new ConcurrentHashMap<Long, AtomicInteger>();
|
||||
|
||||
public CompactionRequest(HRegion r, Store s,
|
||||
CompactSelection files, boolean isMajor, int p) {
|
||||
Preconditions.checkNotNull(r);
|
||||
|
@ -75,6 +86,58 @@ public class CompactionRequest implements Comparable<CompactionRequest>,
|
|||
this.timeInNanos = System.nanoTime();
|
||||
}
|
||||
|
||||
/**
|
||||
* Find out if a given region is in compaction now.
|
||||
*
|
||||
* @param regionId
|
||||
* @return
|
||||
*/
|
||||
public static CompactionState getCompactionState(
|
||||
final long regionId) {
|
||||
Long key = Long.valueOf(regionId);
|
||||
AtomicInteger major = majorCompactions.get(key);
|
||||
AtomicInteger minor = minorCompactions.get(key);
|
||||
int state = 0;
|
||||
if (minor != null && minor.get() > 0) {
|
||||
state += 1; // use 1 to indicate minor here
|
||||
}
|
||||
if (major != null && major.get() > 0) {
|
||||
state += 2; // use 2 to indicate major here
|
||||
}
|
||||
switch (state) {
|
||||
case 3: // 3 = 2 + 1, so both major and minor
|
||||
return CompactionState.MAJOR_AND_MINOR;
|
||||
case 2:
|
||||
return CompactionState.MAJOR;
|
||||
case 1:
|
||||
return CompactionState.MINOR;
|
||||
default:
|
||||
return CompactionState.NONE;
|
||||
}
|
||||
}
|
||||
|
||||
public static void preRequest(final CompactionRequest cr){
|
||||
Long key = Long.valueOf(cr.getHRegion().getRegionId());
|
||||
ConcurrentHashMap<Long, AtomicInteger> compactions =
|
||||
cr.isMajor() ? majorCompactions : minorCompactions;
|
||||
AtomicInteger count = compactions.get(key);
|
||||
if (count == null) {
|
||||
compactions.putIfAbsent(key, new AtomicInteger(0));
|
||||
count = compactions.get(key);
|
||||
}
|
||||
count.incrementAndGet();
|
||||
}
|
||||
|
||||
public static void postRequest(final CompactionRequest cr){
|
||||
Long key = Long.valueOf(cr.getHRegion().getRegionId());
|
||||
ConcurrentHashMap<Long, AtomicInteger> compactions =
|
||||
cr.isMajor() ? majorCompactions : minorCompactions;
|
||||
AtomicInteger count = compactions.get(key);
|
||||
if (count != null) {
|
||||
count.decrementAndGet();
|
||||
}
|
||||
}
|
||||
|
||||
public void finishRequest() {
|
||||
this.compactSelection.finishRequest();
|
||||
}
|
||||
|
|
|
@ -28,10 +28,19 @@ import "hbase.proto";
|
|||
|
||||
message GetRegionInfoRequest {
|
||||
required RegionSpecifier region = 1;
|
||||
optional bool compactionState = 2;
|
||||
}
|
||||
|
||||
message GetRegionInfoResponse {
|
||||
required RegionInfo regionInfo = 1;
|
||||
optional CompactionState compactionState = 2;
|
||||
|
||||
enum CompactionState {
|
||||
NONE = 0;
|
||||
MINOR = 1;
|
||||
MAJOR = 2;
|
||||
MAJOR_AND_MINOR = 3;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -151,6 +151,11 @@
|
|||
<td><%= hbadmin.isTableEnabled(table.getTableName()) %></td>
|
||||
<td>Is the table enabled</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Compaction</td>
|
||||
<td><%= hbadmin.getCompactionState(table.getTableName()) %></td>
|
||||
<td>Is the table compacting</td>
|
||||
</tr>
|
||||
<% if (showFragmentation) { %>
|
||||
<tr>
|
||||
<td>Fragmentation</td>
|
||||
|
|
|
@ -0,0 +1,171 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.regionserver;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.MediumTests;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
|
||||
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.TestRegionSplitCalculator;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
import org.junit.experimental.categories.Category;
|
||||
|
||||
/** Unit tests to test retrieving table/region compaction state*/
|
||||
@Category(MediumTests.class)
|
||||
public class TestCompactionState {
|
||||
final static Log LOG = LogFactory.getLog(TestCompactionState.class);
|
||||
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
|
||||
private final static Random random = new Random();
|
||||
|
||||
@BeforeClass
|
||||
public static void setUpBeforeClass() throws Exception {
|
||||
TEST_UTIL.startMiniCluster();
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void tearDownAfterClass() throws Exception {
|
||||
TEST_UTIL.shutdownMiniCluster();
|
||||
}
|
||||
|
||||
@Test(timeout=60000)
|
||||
public void testMajorCompaction() throws IOException, InterruptedException {
|
||||
compaction("testMajorCompaction", 8, CompactionState.MAJOR);
|
||||
}
|
||||
|
||||
@Test(timeout=60000)
|
||||
public void testMinorCompaction() throws IOException, InterruptedException {
|
||||
compaction("testMinorCompaction", 15, CompactionState.MINOR);
|
||||
}
|
||||
|
||||
/**
|
||||
* Load data to a table, flush it to disk, trigger compaction,
|
||||
* confirm the compaction state is right and wait till it is done.
|
||||
*
|
||||
* @param tableName
|
||||
* @param flushes
|
||||
* @param expectedState
|
||||
* @throws IOException
|
||||
* @throws InterruptedException
|
||||
*/
|
||||
private void compaction(final String tableName, final int flushes,
|
||||
final CompactionState expectedState) throws IOException, InterruptedException {
|
||||
// Create a table with regions
|
||||
byte [] table = Bytes.toBytes(tableName);
|
||||
byte [] family = Bytes.toBytes("family");
|
||||
HTable ht = null;
|
||||
try {
|
||||
ht = TEST_UTIL.createTable(table, family);
|
||||
loadData(ht, family, 3000, flushes);
|
||||
HRegionServer rs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0);
|
||||
List<HRegion> regions = rs.getOnlineRegions(table);
|
||||
int countBefore = countStoreFiles(regions, family);
|
||||
assertTrue(countBefore > 0); // there should be some data files
|
||||
HBaseAdmin admin = new HBaseAdmin(TEST_UTIL.getConfiguration());
|
||||
if (expectedState == CompactionState.MINOR) {
|
||||
admin.compact(tableName);
|
||||
} else {
|
||||
admin.majorCompact(table);
|
||||
}
|
||||
long curt = System.currentTimeMillis();
|
||||
long waitTime = 5000;
|
||||
long endt = curt + waitTime;
|
||||
CompactionState state = admin.getCompactionState(table);
|
||||
while (state == CompactionState.NONE && curt < endt) {
|
||||
Thread.sleep(10);
|
||||
state = admin.getCompactionState(table);
|
||||
curt = System.currentTimeMillis();
|
||||
}
|
||||
// Now, should have the right compaction state,
|
||||
// otherwise, the compaction should have already been done
|
||||
if (expectedState != state) {
|
||||
for (HRegion region: regions) {
|
||||
state = CompactionRequest.getCompactionState(region.getRegionId());
|
||||
assertEquals(CompactionState.NONE, state);
|
||||
}
|
||||
} else {
|
||||
curt = System.currentTimeMillis();
|
||||
waitTime = 20000;
|
||||
endt = curt + waitTime;
|
||||
state = admin.getCompactionState(table);
|
||||
while (state != CompactionState.NONE && curt < endt) {
|
||||
Thread.sleep(10);
|
||||
state = admin.getCompactionState(table);
|
||||
curt = System.currentTimeMillis();
|
||||
}
|
||||
// Now, compaction should be done.
|
||||
assertEquals(CompactionState.NONE, state);
|
||||
}
|
||||
int countAfter = countStoreFiles(regions, family);
|
||||
assertTrue(countAfter < countBefore);
|
||||
if (expectedState == CompactionState.MAJOR) assertTrue(1 == countAfter);
|
||||
else assertTrue(1 < countAfter);
|
||||
} finally {
|
||||
if (ht != null) {
|
||||
TEST_UTIL.deleteTable(table);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static int countStoreFiles(
|
||||
List<HRegion> regions, final byte[] family) {
|
||||
int count = 0;
|
||||
for (HRegion region: regions) {
|
||||
count += region.getStoreFileList(new byte[][]{family}).size();
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
private static void loadData(final HTable ht, final byte[] family,
|
||||
final int rows, final int flushes) throws IOException {
|
||||
List<Put> puts = new ArrayList<Put>(rows);
|
||||
byte[] qualifier = Bytes.toBytes("val");
|
||||
for (int i = 0; i < flushes; i++) {
|
||||
for (int k = 0; k < rows; k++) {
|
||||
byte[] row = Bytes.toBytes(random.nextLong());
|
||||
Put p = new Put(row);
|
||||
p.add(family, qualifier, row);
|
||||
puts.add(p);
|
||||
}
|
||||
ht.put(puts);
|
||||
ht.flushCommits();
|
||||
TEST_UTIL.flush();
|
||||
puts.clear();
|
||||
}
|
||||
}
|
||||
|
||||
@org.junit.Rule
|
||||
public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu =
|
||||
new org.apache.hadoop.hbase.ResourceCheckerJUnitRule();
|
||||
}
|
|
@ -36,6 +36,10 @@
|
|||
<description>General client pause value. Used mostly as value to wait
|
||||
before running a retry of a failed get, region lookup, etc.</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>hbase.defaults.for.version.skip</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>hbase.client.retries.number</name>
|
||||
<value>10</value>
|
||||
|
|
Loading…
Reference in New Issue