HBASE-12859 New master API to track major compaction completion.

This commit is contained in:
Lars Hofhansl 2015-01-29 13:58:27 -08:00
parent 6bfa8ea977
commit 1270e590d1
22 changed files with 2272 additions and 116 deletions

View File

@ -268,6 +268,29 @@ public class ClusterStatus extends VersionedWritable {
return masterCoprocessors; return masterCoprocessors;
} }
public long getLastMajorCompactionTsForTable(TableName table) {
long result = Long.MAX_VALUE;
for (ServerName server : getServers()) {
ServerLoad load = getLoad(server);
for (RegionLoad rl : load.getRegionsLoad().values()) {
if (table.equals(HRegionInfo.getTable(rl.getName()))) {
result = Math.min(result, rl.getLastMajorCompactionTs());
}
}
}
return result == Long.MAX_VALUE ? 0 : result;
}
public long getLastMajorCompactionTsForRegion(final byte[] region) {
for (ServerName server : getServers()) {
ServerLoad load = getLoad(server);
RegionLoad rl = load.getRegionsLoad().get(region);
if (rl != null) {
return rl.getLastMajorCompactionTs();
}
}
return 0;
}
public boolean isBalancerOn() { public boolean isBalancerOn() {
return balancerOn != null && balancerOn; return balancerOn != null && balancerOn;

View File

@ -169,6 +169,14 @@ public class RegionLoad {
} }
return 0.0f; return 0.0f;
} }
/**
* @return the timestamp of the oldest hfile for any store of this region.
*/
public long getLastMajorCompactionTs() {
return regionLoadPB.getLastMajorCompactionTs();
}
/** /**
* @see java.lang.Object#toString() * @see java.lang.Object#toString()
*/ */
@ -180,6 +188,8 @@ public class RegionLoad {
this.getStorefiles()); this.getStorefiles());
sb = Strings.appendKeyValue(sb, "storefileUncompressedSizeMB", sb = Strings.appendKeyValue(sb, "storefileUncompressedSizeMB",
this.getStoreUncompressedSizeMB()); this.getStoreUncompressedSizeMB());
sb = Strings.appendKeyValue(sb, "lastMajorCompactionTimestamp",
this.getLastMajorCompactionTs());
sb = Strings.appendKeyValue(sb, "storefileSizeMB", sb = Strings.appendKeyValue(sb, "storefileSizeMB",
this.getStorefileSizeMB()); this.getStorefileSizeMB());
if (this.getStoreUncompressedSizeMB() != 0) { if (this.getStoreUncompressedSizeMB() != 0) {

View File

@ -957,6 +957,32 @@ public interface Admin extends Abortable, Closeable {
AdminProtos.GetRegionInfoResponse.CompactionState getCompactionStateForRegion( AdminProtos.GetRegionInfoResponse.CompactionState getCompactionStateForRegion(
final byte[] regionName) throws IOException; final byte[] regionName) throws IOException;
/**
* Get the timestamp of the last major compaction for the passed table
*
* The timestamp of the oldest HFile resulting from a major compaction of that table,
* or 0 if no such HFile could be found.
*
* @param tableName table to examine
* @return the last major compaction timestamp or 0
* @throws IOException if a remote or network exception occurs
*/
long getLastMajorCompactionTimestamp(final TableName tableName)
throws IOException;
/**
* Get the timestamp of the last major compaction for the passed region.
*
* The timestamp of the oldest HFile resulting from a major compaction of that region,
* or 0 if no such HFile could be found.
*
* @param regionName region to examine
* @return the last major compaction timestamp or 0
* @throws IOException if a remote or network exception occurs
*/
long getLastMajorCompactionTimestampForRegion(final byte[] regionName)
throws IOException;
/** /**
* Take a snapshot for the given table. If the table is enabled, a FLUSH-type snapshot will be * Take a snapshot for the given table. If the table is enabled, a FLUSH-type snapshot will be
* taken. If the table is disabled, an offline snapshot is taken. Snapshots are considered unique * taken. If the table is disabled, an offline snapshot is taken. Snapshots are considered unique

View File

@ -136,6 +136,9 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescript
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnResponse;
@ -2037,6 +2040,20 @@ final class ConnectionManager {
throws ServiceException { throws ServiceException {
return stub.setQuota(controller, request); return stub.setQuota(controller, request);
} }
@Override
public MajorCompactionTimestampResponse getLastMajorCompactionTimestamp(
RpcController controller, MajorCompactionTimestampRequest request)
throws ServiceException {
return stub.getLastMajorCompactionTimestamp(controller, request);
}
@Override
public MajorCompactionTimestampResponse getLastMajorCompactionTimestampForRegion(
RpcController controller, MajorCompactionTimestampForRegionRequest request)
throws ServiceException {
return stub.getLastMajorCompactionTimestampForRegion(controller, request);
}
}; };
} }

View File

@ -83,6 +83,7 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfiguratio
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest;
@ -115,6 +116,8 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRes
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableRequest;
@ -3771,4 +3774,33 @@ public class HBaseAdmin implements Admin {
throw new IOException("Failed to get master info port from MasterAddressTracker", e); throw new IOException("Failed to get master info port from MasterAddressTracker", e);
} }
} }
@Override
public long getLastMajorCompactionTimestamp(final TableName tableName) throws IOException {
return executeCallable(new MasterCallable<Long>(getConnection()) {
@Override
public Long call(int callTimeout) throws ServiceException {
MajorCompactionTimestampRequest req =
MajorCompactionTimestampRequest.newBuilder()
.setTableName(ProtobufUtil.toProtoTableName(tableName)).build();
return master.getLastMajorCompactionTimestamp(null, req).getCompactionTimestamp();
}
});
}
@Override
public long getLastMajorCompactionTimestampForRegion(final byte[] regionName) throws IOException {
return executeCallable(new MasterCallable<Long>(getConnection()) {
@Override
public Long call(int callTimeout) throws ServiceException {
MajorCompactionTimestampForRegionRequest req =
MajorCompactionTimestampForRegionRequest
.newBuilder()
.setRegion(
RequestConverter
.buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName)).build();
return master.getLastMajorCompactionTimestampForRegion(null, req).getCompactionTimestamp();
}
});
}
} }

View File

@ -56,6 +56,7 @@ public class HFileContext implements HeapSize, Cloneable {
private DataBlockEncoding encoding = DataBlockEncoding.NONE; private DataBlockEncoding encoding = DataBlockEncoding.NONE;
/** Encryption algorithm and key used */ /** Encryption algorithm and key used */
private Encryption.Context cryptoContext = Encryption.Context.NONE; private Encryption.Context cryptoContext = Encryption.Context.NONE;
private long fileCreateTime;
//Empty constructor. Go with setters //Empty constructor. Go with setters
public HFileContext() { public HFileContext() {
@ -76,12 +77,13 @@ public class HFileContext implements HeapSize, Cloneable {
this.blocksize = context.blocksize; this.blocksize = context.blocksize;
this.encoding = context.encoding; this.encoding = context.encoding;
this.cryptoContext = context.cryptoContext; this.cryptoContext = context.cryptoContext;
this.fileCreateTime = context.fileCreateTime;
} }
public HFileContext(boolean useHBaseChecksum, boolean includesMvcc, boolean includesTags, public HFileContext(boolean useHBaseChecksum, boolean includesMvcc, boolean includesTags,
Compression.Algorithm compressAlgo, boolean compressTags, ChecksumType checksumType, Compression.Algorithm compressAlgo, boolean compressTags, ChecksumType checksumType,
int bytesPerChecksum, int blockSize, DataBlockEncoding encoding, int bytesPerChecksum, int blockSize, DataBlockEncoding encoding,
Encryption.Context cryptoContext) { Encryption.Context cryptoContext, long fileCreateTime) {
this.usesHBaseChecksum = useHBaseChecksum; this.usesHBaseChecksum = useHBaseChecksum;
this.includesMvcc = includesMvcc; this.includesMvcc = includesMvcc;
this.includesTags = includesTags; this.includesTags = includesTags;
@ -94,6 +96,7 @@ public class HFileContext implements HeapSize, Cloneable {
this.encoding = encoding; this.encoding = encoding;
} }
this.cryptoContext = cryptoContext; this.cryptoContext = cryptoContext;
this.fileCreateTime = fileCreateTime;
} }
/** /**
@ -141,6 +144,10 @@ public class HFileContext implements HeapSize, Cloneable {
this.includesTags = includesTags; this.includesTags = includesTags;
} }
public void setFileCreateTime(long fileCreateTime) {
this.fileCreateTime = fileCreateTime;
}
public boolean isCompressTags() { public boolean isCompressTags() {
return compressTags; return compressTags;
} }
@ -161,6 +168,10 @@ public class HFileContext implements HeapSize, Cloneable {
return blocksize; return blocksize;
} }
public long getFileCreateTime() {
return fileCreateTime;
}
public DataBlockEncoding getDataBlockEncoding() { public DataBlockEncoding getDataBlockEncoding() {
return encoding; return encoding;
} }
@ -189,7 +200,8 @@ public class HFileContext implements HeapSize, Cloneable {
4 * ClassSize.REFERENCE + 4 * ClassSize.REFERENCE +
2 * Bytes.SIZEOF_INT + 2 * Bytes.SIZEOF_INT +
// usesHBaseChecksum, includesMvcc, includesTags and compressTags // usesHBaseChecksum, includesMvcc, includesTags and compressTags
4 * Bytes.SIZEOF_BOOLEAN); 4 * Bytes.SIZEOF_BOOLEAN +
Bytes.SIZEOF_LONG);
return size; return size;
} }

View File

@ -52,6 +52,7 @@ public class HFileContextBuilder {
private DataBlockEncoding encoding = DataBlockEncoding.NONE; private DataBlockEncoding encoding = DataBlockEncoding.NONE;
/** Crypto context */ /** Crypto context */
private Encryption.Context cryptoContext = Encryption.Context.NONE; private Encryption.Context cryptoContext = Encryption.Context.NONE;
private long fileCreateTime = 0;
public HFileContextBuilder withHBaseCheckSum(boolean useHBaseCheckSum) { public HFileContextBuilder withHBaseCheckSum(boolean useHBaseCheckSum) {
this.usesHBaseChecksum = useHBaseCheckSum; this.usesHBaseChecksum = useHBaseCheckSum;
@ -103,8 +104,14 @@ public class HFileContextBuilder {
return this; return this;
} }
public HFileContextBuilder withCreateTime(long fileCreateTime) {
this.fileCreateTime = fileCreateTime;
return this;
}
public HFileContext build() { public HFileContext build() {
return new HFileContext(usesHBaseChecksum, includesMvcc, includesTags, compression, return new HFileContext(usesHBaseChecksum, includesMvcc, includesTags, compression,
compressTags, checksumType, bytesPerChecksum, blocksize, encoding, cryptoContext); compressTags, checksumType, bytesPerChecksum, blocksize, encoding, cryptoContext,
fileCreateTime);
} }
} }

View File

@ -2171,6 +2171,16 @@ public final class ClusterStatusProtos {
* </pre> * </pre>
*/ */
float getDataLocality(); float getDataLocality();
// optional uint64 last_major_compaction_ts = 17 [default = 0];
/**
* <code>optional uint64 last_major_compaction_ts = 17 [default = 0];</code>
*/
boolean hasLastMajorCompactionTs();
/**
* <code>optional uint64 last_major_compaction_ts = 17 [default = 0];</code>
*/
long getLastMajorCompactionTs();
} }
/** /**
* Protobuf type {@code RegionLoad} * Protobuf type {@code RegionLoad}
@ -2311,6 +2321,11 @@ public final class ClusterStatusProtos {
dataLocality_ = input.readFloat(); dataLocality_ = input.readFloat();
break; break;
} }
case 136: {
bitField0_ |= 0x00010000;
lastMajorCompactionTs_ = input.readUInt64();
break;
}
} }
} }
} catch (com.google.protobuf.InvalidProtocolBufferException e) { } catch (com.google.protobuf.InvalidProtocolBufferException e) {
@ -2753,6 +2768,22 @@ public final class ClusterStatusProtos {
return dataLocality_; return dataLocality_;
} }
// optional uint64 last_major_compaction_ts = 17 [default = 0];
public static final int LAST_MAJOR_COMPACTION_TS_FIELD_NUMBER = 17;
private long lastMajorCompactionTs_;
/**
* <code>optional uint64 last_major_compaction_ts = 17 [default = 0];</code>
*/
public boolean hasLastMajorCompactionTs() {
return ((bitField0_ & 0x00010000) == 0x00010000);
}
/**
* <code>optional uint64 last_major_compaction_ts = 17 [default = 0];</code>
*/
public long getLastMajorCompactionTs() {
return lastMajorCompactionTs_;
}
private void initFields() { private void initFields() {
regionSpecifier_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); regionSpecifier_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
stores_ = 0; stores_ = 0;
@ -2770,6 +2801,7 @@ public final class ClusterStatusProtos {
totalStaticBloomSizeKB_ = 0; totalStaticBloomSizeKB_ = 0;
completeSequenceId_ = 0L; completeSequenceId_ = 0L;
dataLocality_ = 0F; dataLocality_ = 0F;
lastMajorCompactionTs_ = 0L;
} }
private byte memoizedIsInitialized = -1; private byte memoizedIsInitialized = -1;
public final boolean isInitialized() { public final boolean isInitialized() {
@ -2839,6 +2871,9 @@ public final class ClusterStatusProtos {
if (((bitField0_ & 0x00008000) == 0x00008000)) { if (((bitField0_ & 0x00008000) == 0x00008000)) {
output.writeFloat(16, dataLocality_); output.writeFloat(16, dataLocality_);
} }
if (((bitField0_ & 0x00010000) == 0x00010000)) {
output.writeUInt64(17, lastMajorCompactionTs_);
}
getUnknownFields().writeTo(output); getUnknownFields().writeTo(output);
} }
@ -2912,6 +2947,10 @@ public final class ClusterStatusProtos {
size += com.google.protobuf.CodedOutputStream size += com.google.protobuf.CodedOutputStream
.computeFloatSize(16, dataLocality_); .computeFloatSize(16, dataLocality_);
} }
if (((bitField0_ & 0x00010000) == 0x00010000)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(17, lastMajorCompactionTs_);
}
size += getUnknownFields().getSerializedSize(); size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size; memoizedSerializedSize = size;
return size; return size;
@ -3014,6 +3053,11 @@ public final class ClusterStatusProtos {
if (hasDataLocality()) { if (hasDataLocality()) {
result = result && (Float.floatToIntBits(getDataLocality()) == Float.floatToIntBits(other.getDataLocality())); result = result && (Float.floatToIntBits(getDataLocality()) == Float.floatToIntBits(other.getDataLocality()));
} }
result = result && (hasLastMajorCompactionTs() == other.hasLastMajorCompactionTs());
if (hasLastMajorCompactionTs()) {
result = result && (getLastMajorCompactionTs()
== other.getLastMajorCompactionTs());
}
result = result && result = result &&
getUnknownFields().equals(other.getUnknownFields()); getUnknownFields().equals(other.getUnknownFields());
return result; return result;
@ -3092,6 +3136,10 @@ public final class ClusterStatusProtos {
hash = (53 * hash) + Float.floatToIntBits( hash = (53 * hash) + Float.floatToIntBits(
getDataLocality()); getDataLocality());
} }
if (hasLastMajorCompactionTs()) {
hash = (37 * hash) + LAST_MAJOR_COMPACTION_TS_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getLastMajorCompactionTs());
}
hash = (29 * hash) + getUnknownFields().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash; memoizedHashCode = hash;
return hash; return hash;
@ -3238,6 +3286,8 @@ public final class ClusterStatusProtos {
bitField0_ = (bitField0_ & ~0x00004000); bitField0_ = (bitField0_ & ~0x00004000);
dataLocality_ = 0F; dataLocality_ = 0F;
bitField0_ = (bitField0_ & ~0x00008000); bitField0_ = (bitField0_ & ~0x00008000);
lastMajorCompactionTs_ = 0L;
bitField0_ = (bitField0_ & ~0x00010000);
return this; return this;
} }
@ -3334,6 +3384,10 @@ public final class ClusterStatusProtos {
to_bitField0_ |= 0x00008000; to_bitField0_ |= 0x00008000;
} }
result.dataLocality_ = dataLocality_; result.dataLocality_ = dataLocality_;
if (((from_bitField0_ & 0x00010000) == 0x00010000)) {
to_bitField0_ |= 0x00010000;
}
result.lastMajorCompactionTs_ = lastMajorCompactionTs_;
result.bitField0_ = to_bitField0_; result.bitField0_ = to_bitField0_;
onBuilt(); onBuilt();
return result; return result;
@ -3398,6 +3452,9 @@ public final class ClusterStatusProtos {
if (other.hasDataLocality()) { if (other.hasDataLocality()) {
setDataLocality(other.getDataLocality()); setDataLocality(other.getDataLocality());
} }
if (other.hasLastMajorCompactionTs()) {
setLastMajorCompactionTs(other.getLastMajorCompactionTs());
}
this.mergeUnknownFields(other.getUnknownFields()); this.mergeUnknownFields(other.getUnknownFields());
return this; return this;
} }
@ -4337,6 +4394,39 @@ public final class ClusterStatusProtos {
return this; return this;
} }
// optional uint64 last_major_compaction_ts = 17 [default = 0];
private long lastMajorCompactionTs_ ;
/**
* <code>optional uint64 last_major_compaction_ts = 17 [default = 0];</code>
*/
public boolean hasLastMajorCompactionTs() {
return ((bitField0_ & 0x00010000) == 0x00010000);
}
/**
* <code>optional uint64 last_major_compaction_ts = 17 [default = 0];</code>
*/
public long getLastMajorCompactionTs() {
return lastMajorCompactionTs_;
}
/**
* <code>optional uint64 last_major_compaction_ts = 17 [default = 0];</code>
*/
public Builder setLastMajorCompactionTs(long value) {
bitField0_ |= 0x00010000;
lastMajorCompactionTs_ = value;
onChanged();
return this;
}
/**
* <code>optional uint64 last_major_compaction_ts = 17 [default = 0];</code>
*/
public Builder clearLastMajorCompactionTs() {
bitField0_ = (bitField0_ & ~0x00010000);
lastMajorCompactionTs_ = 0L;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:RegionLoad) // @@protoc_insertion_point(builder_scope:RegionLoad)
} }
@ -10472,7 +10562,7 @@ public final class ClusterStatusProtos {
"PLITTING_NEW\020\r\022\017\n\013MERGING_NEW\020\016\"X\n\022Regio", "PLITTING_NEW\020\r\022\017\n\013MERGING_NEW\020\016\"X\n\022Regio",
"nInTransition\022\036\n\004spec\030\001 \002(\0132\020.RegionSpec" + "nInTransition\022\036\n\004spec\030\001 \002(\0132\020.RegionSpec" +
"ifier\022\"\n\014region_state\030\002 \002(\0132\014.RegionStat" + "ifier\022\"\n\014region_state\030\002 \002(\0132\014.RegionStat" +
"e\"\347\003\n\nRegionLoad\022*\n\020region_specifier\030\001 \002" + "e\"\214\004\n\nRegionLoad\022*\n\020region_specifier\030\001 \002" +
"(\0132\020.RegionSpecifier\022\016\n\006stores\030\002 \001(\r\022\022\n\n" + "(\0132\020.RegionSpecifier\022\016\n\006stores\030\002 \001(\r\022\022\n\n" +
"storefiles\030\003 \001(\r\022\"\n\032store_uncompressed_s" + "storefiles\030\003 \001(\r\022\"\n\032store_uncompressed_s" +
"ize_MB\030\004 \001(\r\022\031\n\021storefile_size_MB\030\005 \001(\r\022" + "ize_MB\030\004 \001(\r\022\031\n\021storefile_size_MB\030\005 \001(\r\022" +
@ -10484,26 +10574,27 @@ public final class ClusterStatusProtos {
"\030\014 \001(\r\022\"\n\032total_static_index_size_KB\030\r \001" + "\030\014 \001(\r\022\"\n\032total_static_index_size_KB\030\r \001" +
"(\r\022\"\n\032total_static_bloom_size_KB\030\016 \001(\r\022\034" + "(\r\022\"\n\032total_static_bloom_size_KB\030\016 \001(\r\022\034" +
"\n\024complete_sequence_id\030\017 \001(\004\022\025\n\rdata_loc" + "\n\024complete_sequence_id\030\017 \001(\004\022\025\n\rdata_loc" +
"ality\030\020 \001(\002\"\212\002\n\nServerLoad\022\032\n\022number_of_" + "ality\030\020 \001(\002\022#\n\030last_major_compaction_ts\030" +
"requests\030\001 \001(\r\022 \n\030total_number_of_reques" + "\021 \001(\004:\0010\"\212\002\n\nServerLoad\022\032\n\022number_of_req" +
"ts\030\002 \001(\r\022\024\n\014used_heap_MB\030\003 \001(\r\022\023\n\013max_he" + "uests\030\001 \001(\r\022 \n\030total_number_of_requests\030" +
"ap_MB\030\004 \001(\r\022!\n\014region_loads\030\005 \003(\0132\013.Regi" + "\002 \001(\r\022\024\n\014used_heap_MB\030\003 \001(\r\022\023\n\013max_heap_" +
"onLoad\022\"\n\014coprocessors\030\006 \003(\0132\014.Coprocess" + "MB\030\004 \001(\r\022!\n\014region_loads\030\005 \003(\0132\013.RegionL" +
"or\022\031\n\021report_start_time\030\007 \001(\004\022\027\n\017report_", "oad\022\"\n\014coprocessors\030\006 \003(\0132\014.Coprocessor\022",
"end_time\030\010 \001(\004\022\030\n\020info_server_port\030\t \001(\r" + "\031\n\021report_start_time\030\007 \001(\004\022\027\n\017report_end" +
"\"O\n\016LiveServerInfo\022\033\n\006server\030\001 \002(\0132\013.Ser" + "_time\030\010 \001(\004\022\030\n\020info_server_port\030\t \001(\r\"O\n" +
"verName\022 \n\013server_load\030\002 \002(\0132\013.ServerLoa" + "\016LiveServerInfo\022\033\n\006server\030\001 \002(\0132\013.Server" +
"d\"\340\002\n\rClusterStatus\022/\n\rhbase_version\030\001 \001" + "Name\022 \n\013server_load\030\002 \002(\0132\013.ServerLoad\"\340" +
"(\0132\030.HBaseVersionFileContent\022%\n\014live_ser" + "\002\n\rClusterStatus\022/\n\rhbase_version\030\001 \001(\0132" +
"vers\030\002 \003(\0132\017.LiveServerInfo\022!\n\014dead_serv" + "\030.HBaseVersionFileContent\022%\n\014live_server" +
"ers\030\003 \003(\0132\013.ServerName\0222\n\025regions_in_tra" + "s\030\002 \003(\0132\017.LiveServerInfo\022!\n\014dead_servers" +
"nsition\030\004 \003(\0132\023.RegionInTransition\022\036\n\ncl" + "\030\003 \003(\0132\013.ServerName\0222\n\025regions_in_transi" +
"uster_id\030\005 \001(\0132\n.ClusterId\022)\n\023master_cop" + "tion\030\004 \003(\0132\023.RegionInTransition\022\036\n\nclust" +
"rocessors\030\006 \003(\0132\014.Coprocessor\022\033\n\006master\030", "er_id\030\005 \001(\0132\n.ClusterId\022)\n\023master_coproc",
"\007 \001(\0132\013.ServerName\022#\n\016backup_masters\030\010 \003" + "essors\030\006 \003(\0132\014.Coprocessor\022\033\n\006master\030\007 \001" +
"(\0132\013.ServerName\022\023\n\013balancer_on\030\t \001(\010BF\n*" + "(\0132\013.ServerName\022#\n\016backup_masters\030\010 \003(\0132" +
"org.apache.hadoop.hbase.protobuf.generat" + "\013.ServerName\022\023\n\013balancer_on\030\t \001(\010BF\n*org" +
"edB\023ClusterStatusProtosH\001\240\001\001" ".apache.hadoop.hbase.protobuf.generatedB" +
"\023ClusterStatusProtosH\001\240\001\001"
}; };
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@ -10527,7 +10618,7 @@ public final class ClusterStatusProtos {
internal_static_RegionLoad_fieldAccessorTable = new internal_static_RegionLoad_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable( com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_RegionLoad_descriptor, internal_static_RegionLoad_descriptor,
new java.lang.String[] { "RegionSpecifier", "Stores", "Storefiles", "StoreUncompressedSizeMB", "StorefileSizeMB", "MemstoreSizeMB", "StorefileIndexSizeMB", "ReadRequestsCount", "WriteRequestsCount", "TotalCompactingKVs", "CurrentCompactedKVs", "RootIndexSizeKB", "TotalStaticIndexSizeKB", "TotalStaticBloomSizeKB", "CompleteSequenceId", "DataLocality", }); new java.lang.String[] { "RegionSpecifier", "Stores", "Storefiles", "StoreUncompressedSizeMB", "StorefileSizeMB", "MemstoreSizeMB", "StorefileIndexSizeMB", "ReadRequestsCount", "WriteRequestsCount", "TotalCompactingKVs", "CurrentCompactedKVs", "RootIndexSizeKB", "TotalStaticIndexSizeKB", "TotalStaticBloomSizeKB", "CompleteSequenceId", "DataLocality", "LastMajorCompactionTs", });
internal_static_ServerLoad_descriptor = internal_static_ServerLoad_descriptor =
getDescriptor().getMessageTypes().get(3); getDescriptor().getMessageTypes().get(3);
internal_static_ServerLoad_fieldAccessorTable = new internal_static_ServerLoad_fieldAccessorTable = new

View File

@ -113,6 +113,8 @@ message RegionLoad {
/** The current data locality for region in the regionserver */ /** The current data locality for region in the regionserver */
optional float data_locality = 16; optional float data_locality = 16;
optional uint64 last_major_compaction_ts = 17 [default = 0];
} }
/* Server-level protobufs */ /* Server-level protobufs */

View File

@ -387,6 +387,18 @@ message SetQuotaRequest {
message SetQuotaResponse { message SetQuotaResponse {
} }
message MajorCompactionTimestampRequest {
required TableName table_name = 1;
}
message MajorCompactionTimestampForRegionRequest {
required RegionSpecifier region = 1;
}
message MajorCompactionTimestampResponse {
required int64 compaction_timestamp = 1;
}
service MasterService { service MasterService {
/** Used by the client to get the number of regions that have received the updated schema */ /** Used by the client to get the number of regions that have received the updated schema */
rpc GetSchemaAlterStatus(GetSchemaAlterStatusRequest) rpc GetSchemaAlterStatus(GetSchemaAlterStatusRequest)
@ -601,4 +613,12 @@ service MasterService {
/** Apply the new quota settings */ /** Apply the new quota settings */
rpc SetQuota(SetQuotaRequest) returns(SetQuotaResponse); rpc SetQuota(SetQuotaRequest) returns(SetQuotaResponse);
/** Returns the timestamp of the last major compaction */
rpc getLastMajorCompactionTimestamp(MajorCompactionTimestampRequest)
returns(MajorCompactionTimestampResponse);
/** Returns the timestamp of the last major compaction */
rpc getLastMajorCompactionTimestampForRegion(MajorCompactionTimestampForRegionRequest)
returns(MajorCompactionTimestampResponse);
} }

View File

@ -148,6 +148,9 @@ public abstract class AbstractHFileWriter implements HFile.Writer {
int avgValueLen = int avgValueLen =
entryCount == 0 ? 0 : (int) (totalValueLength / entryCount); entryCount == 0 ? 0 : (int) (totalValueLength / entryCount);
fileInfo.append(FileInfo.AVG_VALUE_LEN, Bytes.toBytes(avgValueLen), false); fileInfo.append(FileInfo.AVG_VALUE_LEN, Bytes.toBytes(avgValueLen), false);
fileInfo.append(FileInfo.CREATE_TIME_TS, Bytes.toBytes(hFileContext.getFileCreateTime()),
false);
} }
/** /**

View File

@ -541,6 +541,7 @@ public class HFile {
static final byte [] LASTKEY = Bytes.toBytes(RESERVED_PREFIX + "LASTKEY"); static final byte [] LASTKEY = Bytes.toBytes(RESERVED_PREFIX + "LASTKEY");
static final byte [] AVG_KEY_LEN = Bytes.toBytes(RESERVED_PREFIX + "AVG_KEY_LEN"); static final byte [] AVG_KEY_LEN = Bytes.toBytes(RESERVED_PREFIX + "AVG_KEY_LEN");
static final byte [] AVG_VALUE_LEN = Bytes.toBytes(RESERVED_PREFIX + "AVG_VALUE_LEN"); static final byte [] AVG_VALUE_LEN = Bytes.toBytes(RESERVED_PREFIX + "AVG_VALUE_LEN");
static final byte [] CREATE_TIME_TS = Bytes.toBytes(RESERVED_PREFIX + "CREATE_TIME_TS");
static final byte [] COMPARATOR = Bytes.toBytes(RESERVED_PREFIX + "COMPARATOR"); static final byte [] COMPARATOR = Bytes.toBytes(RESERVED_PREFIX + "COMPARATOR");
static final byte [] TAGS_COMPRESSED = Bytes.toBytes(RESERVED_PREFIX + "TAGS_COMPRESSED"); static final byte [] TAGS_COMPRESSED = Bytes.toBytes(RESERVED_PREFIX + "TAGS_COMPRESSED");
public static final byte [] MAX_TAGS_LEN = Bytes.toBytes(RESERVED_PREFIX + "MAX_TAGS_LEN"); public static final byte [] MAX_TAGS_LEN = Bytes.toBytes(RESERVED_PREFIX + "MAX_TAGS_LEN");

View File

@ -157,6 +157,7 @@ public class HFileReaderV2 extends AbstractHFileReader {
// File info // File info
fileInfo = new FileInfo(); fileInfo = new FileInfo();
fileInfo.read(blockIter.nextBlockWithBlockType(BlockType.FILE_INFO).getByteStream()); fileInfo.read(blockIter.nextBlockWithBlockType(BlockType.FILE_INFO).getByteStream());
this.hfileContext.setFileCreateTime(Bytes.toLong(fileInfo.get(FileInfo.CREATE_TIME_TS)));
lastKey = fileInfo.get(FileInfo.LASTKEY); lastKey = fileInfo.get(FileInfo.LASTKEY);
avgKeyLen = Bytes.toInt(fileInfo.get(FileInfo.AVG_KEY_LEN)); avgKeyLen = Bytes.toInt(fileInfo.get(FileInfo.AVG_KEY_LEN));
avgValueLen = Bytes.toInt(fileInfo.get(FileInfo.AVG_VALUE_LEN)); avgValueLen = Bytes.toInt(fileInfo.get(FileInfo.AVG_VALUE_LEN));

View File

@ -2290,4 +2290,14 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
} }
} }
} }
@Override
public long getLastMajorCompactionTimestamp(TableName table) throws IOException {
return getClusterStatus().getLastMajorCompactionTsForTable(table);
}
@Override
public long getLastMajorCompactionTimestampForRegion(byte[] regionName) throws IOException {
return getClusterStatus().getLastMajorCompactionTsForRegion(regionName);
}
} }

View File

@ -47,14 +47,11 @@ import org.apache.hadoop.hbase.procedure.MasterProcedureManager;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.RequestConverter; import org.apache.hadoop.hbase.protobuf.RequestConverter;
import org.apache.hadoop.hbase.protobuf.ResponseConverter; import org.apache.hadoop.hbase.protobuf.ResponseConverter;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.protobuf.generated.*;
import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionRequest;
@ -111,6 +108,9 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescript
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnResponse;
@ -1275,4 +1275,35 @@ public class MasterRpcServices extends RSRpcServices
throw new ServiceException(e); throw new ServiceException(e);
} }
} }
@Override
public MajorCompactionTimestampResponse getLastMajorCompactionTimestamp(RpcController controller,
MajorCompactionTimestampRequest request) throws ServiceException {
MajorCompactionTimestampResponse.Builder response =
MajorCompactionTimestampResponse.newBuilder();
try {
master.checkInitialized();
response.setCompactionTimestamp(master.getLastMajorCompactionTimestamp(ProtobufUtil
.toTableName(request.getTableName())));
} catch (IOException e) {
throw new ServiceException(e);
}
return response.build();
}
@Override
public MajorCompactionTimestampResponse getLastMajorCompactionTimestampForRegion(
RpcController controller, MajorCompactionTimestampForRegionRequest request)
throws ServiceException {
MajorCompactionTimestampResponse.Builder response =
MajorCompactionTimestampResponse.newBuilder();
try {
master.checkInitialized();
response.setCompactionTimestamp(master.getLastMajorCompactionTimestampForRegion(request
.getRegion().getValue().toByteArray()));
} catch (IOException e) {
throw new ServiceException(e);
}
return response.build();
}
} }

View File

@ -261,4 +261,20 @@ public interface MasterServices extends Server {
* @throws IOException * @throws IOException
*/ */
public List<TableName> listTableNamesByNamespace(String name) throws IOException; public List<TableName> listTableNamesByNamespace(String name) throws IOException;
/**
* @param table
* @return the timestamp of the last successful major compaction for the passed table,
* or 0 if no HFile resulting from a major compaction exists
* @throws IOException
*/
public long getLastMajorCompactionTimestamp(TableName table) throws IOException;
/**
* @param regionName
* @return the timestamp of the last successful major compaction for the passed region
* or 0 if no HFile resulting from a major compaction exists
* @throws IOException
*/
public long getLastMajorCompactionTimestampForRegion(byte[] regionName) throws IOException;
} }

View File

@ -117,6 +117,7 @@ import org.apache.hadoop.hbase.io.HeapSize;
import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.io.TimeRange;
import org.apache.hadoop.hbase.io.hfile.BlockCache; import org.apache.hadoop.hbase.io.hfile.BlockCache;
import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.ipc.CallerDisconnectedException; import org.apache.hadoop.hbase.ipc.CallerDisconnectedException;
import org.apache.hadoop.hbase.ipc.RpcCallContext; import org.apache.hadoop.hbase.ipc.RpcCallContext;
import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.ipc.RpcServer;
@ -1494,6 +1495,28 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { //
return Collections.min(lastStoreFlushTimeMap.values()); return Collections.min(lastStoreFlushTimeMap.values());
} }
/**
* This can be used to determine the last time all files of this region were major compacted.
* @param majorCompactioOnly Only consider HFile that are the result of major compaction
* @return the timestamp of the oldest HFile for all stores of this region
*/
public long getOldestHfileTs(boolean majorCompactioOnly) throws IOException {
long result = Long.MAX_VALUE;
for (Store store : getStores().values()) {
for (StoreFile file : store.getStorefiles()) {
HFile.Reader reader = file.getReader().getHFileReader();
if (majorCompactioOnly) {
byte[] val = reader.loadFileInfo().get(StoreFile.MAJOR_COMPACTION_KEY);
if (val == null || !Bytes.toBoolean(val)) {
continue;
}
}
result = Math.min(result, reader.getFileContext().getFileCreateTime());
}
}
return result == Long.MAX_VALUE ? 0 : result;
}
////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////
// HRegion maintenance. // HRegion maintenance.
// //

View File

@ -1341,7 +1341,7 @@ public class HRegionServer extends HasThread implements
* @throws IOException * @throws IOException
*/ */
private RegionLoad createRegionLoad(final HRegion r, RegionLoad.Builder regionLoadBldr, private RegionLoad createRegionLoad(final HRegion r, RegionLoad.Builder regionLoadBldr,
RegionSpecifier.Builder regionSpecifier) { RegionSpecifier.Builder regionSpecifier) throws IOException {
byte[] name = r.getRegionName(); byte[] name = r.getRegionName();
int stores = 0; int stores = 0;
int storefiles = 0; int storefiles = 0;
@ -1403,8 +1403,8 @@ public class HRegionServer extends HasThread implements
.setTotalCompactingKVs(totalCompactingKVs) .setTotalCompactingKVs(totalCompactingKVs)
.setCurrentCompactedKVs(currentCompactedKVs) .setCurrentCompactedKVs(currentCompactedKVs)
.setCompleteSequenceId(r.maxFlushedSeqId) .setCompleteSequenceId(r.maxFlushedSeqId)
.setDataLocality(dataLocality); .setDataLocality(dataLocality)
.setLastMajorCompactionTs(r.getOldestHfileTs(true));
return regionLoadBldr.build(); return regionLoadBldr.build();
} }
@ -1412,7 +1412,7 @@ public class HRegionServer extends HasThread implements
* @param encodedRegionName * @param encodedRegionName
* @return An instance of RegionLoad. * @return An instance of RegionLoad.
*/ */
public RegionLoad createRegionLoad(final String encodedRegionName) { public RegionLoad createRegionLoad(final String encodedRegionName) throws IOException {
HRegion r = null; HRegion r = null;
r = this.onlineRegions.get(encodedRegionName); r = this.onlineRegions.get(encodedRegionName);
return r != null ? createRegionLoad(r, null, null) : null; return r != null ? createRegionLoad(r, null, null) : null;

View File

@ -991,6 +991,7 @@ public class HStore implements Store {
.withHBaseCheckSum(true) .withHBaseCheckSum(true)
.withDataBlockEncoding(family.getDataBlockEncoding()) .withDataBlockEncoding(family.getDataBlockEncoding())
.withEncryptionContext(cryptoContext) .withEncryptionContext(cryptoContext)
.withCreateTime(EnvironmentEdgeManager.currentTime())
.build(); .build();
return hFileContext; return hFileContext;
} }

View File

@ -408,6 +408,58 @@ public class TestAdmin1 {
table.close(); table.close();
} }
@Test (timeout=300000)
public void testCompactionTimestamps() throws Exception {
HColumnDescriptor fam1 = new HColumnDescriptor("fam1");
TableName tableName = TableName.valueOf("testCompactionTimestampsTable");
HTableDescriptor htd = new HTableDescriptor(tableName);
htd.addFamily(fam1);
this.admin.createTable(htd);
HTable table = (HTable)TEST_UTIL.getConnection().getTable(htd.getTableName());
long ts = this.admin.getLastMajorCompactionTimestamp(tableName);
assertEquals(0, ts);
Put p = new Put(Bytes.toBytes("row1"));
p.add(Bytes.toBytes("fam1"), Bytes.toBytes("fam1"), Bytes.toBytes("fam1"));
table.put(p);
ts = this.admin.getLastMajorCompactionTimestamp(tableName);
// no files written -> no data
assertEquals(0, ts);
this.admin.flush(tableName);
ts = this.admin.getLastMajorCompactionTimestamp(tableName);
// still 0, we flushed a file, but no major compaction happened
assertEquals(0, ts);
byte[] regionName =
table.getRegionLocator().getAllRegionLocations().get(0).getRegionInfo().getRegionName();
long ts1 = this.admin.getLastMajorCompactionTimestampForRegion(regionName);
assertEquals(ts, ts1);
p = new Put(Bytes.toBytes("row2"));
p.add(Bytes.toBytes("fam1"), Bytes.toBytes("fam1"), Bytes.toBytes("fam1"));
table.put(p);
this.admin.flush(tableName);
ts = this.admin.getLastMajorCompactionTimestamp(tableName);
// make sure the region API returns the same value, as the old file is still around
assertEquals(ts1, ts);
TEST_UTIL.compact(tableName, true);
table.put(p);
// forces a wait for the compaction
this.admin.flush(tableName);
ts = this.admin.getLastMajorCompactionTimestamp(tableName);
// after a compaction our earliest timestamp will have progressed forward
assertTrue(ts > ts1);
// region api still the same
ts1 = this.admin.getLastMajorCompactionTimestampForRegion(regionName);
assertEquals(ts, ts1);
table.put(p);
this.admin.flush(tableName);
ts = this.admin.getLastMajorCompactionTimestamp(tableName);
assertEquals(ts, ts1);
table.close();
}
@Test (timeout=300000) @Test (timeout=300000)
public void testHColumnValidName() { public void testHColumnValidName() {
boolean exceptionThrown; boolean exceptionThrown;

View File

@ -458,6 +458,18 @@ public class TestCatalogJanitor {
// Auto-generated method stub // Auto-generated method stub
return false; return false;
} }
@Override
public long getLastMajorCompactionTimestamp(TableName table) throws IOException {
// Auto-generated method stub
return 0;
}
@Override
public long getLastMajorCompactionTimestampForRegion(byte[] regionName) throws IOException {
// Auto-generated method stub
return 0;
}
} }
@Test @Test