HBASE-12859 New master API to track major compaction completion.

This commit is contained in:
Lars Hofhansl 2015-01-29 13:57:24 -08:00
parent 8ddc90c209
commit 0b3502f2b1
22 changed files with 2268 additions and 109 deletions

View File

@ -267,6 +267,29 @@ public class ClusterStatus extends VersionedWritable {
return masterCoprocessors;
}
public long getLastMajorCompactionTsForTable(TableName table) {
long result = Long.MAX_VALUE;
for (ServerName server : getServers()) {
ServerLoad load = getLoad(server);
for (RegionLoad rl : load.getRegionsLoad().values()) {
if (table.equals(HRegionInfo.getTable(rl.getName()))) {
result = Math.min(result, rl.getLastMajorCompactionTs());
}
}
}
return result == Long.MAX_VALUE ? 0 : result;
}
public long getLastMajorCompactionTsForRegion(final byte[] region) {
for (ServerName server : getServers()) {
ServerLoad load = getLoad(server);
RegionLoad rl = load.getRegionsLoad().get(region);
if (rl != null) {
return rl.getLastMajorCompactionTs();
}
}
return 0;
}
public boolean isBalancerOn() {
return balancerOn != null && balancerOn;

View File

@ -169,6 +169,14 @@ public class RegionLoad {
}
return 0.0f;
}
/**
* @return the timestamp of the oldest hfile for any store of this region.
*/
public long getLastMajorCompactionTs() {
return regionLoadPB.getLastMajorCompactionTs();
}
/**
* @see java.lang.Object#toString()
*/
@ -179,7 +187,9 @@ public class RegionLoad {
sb = Strings.appendKeyValue(sb, "numberOfStorefiles",
this.getStorefiles());
sb = Strings.appendKeyValue(sb, "storefileUncompressedSizeMB",
this.getStoreUncompressedSizeMB());
this.getStoreUncompressedSizeMB());
sb = Strings.appendKeyValue(sb, "lastMajorCompactionTimestamp",
this.getLastMajorCompactionTs());
sb = Strings.appendKeyValue(sb, "storefileSizeMB",
this.getStorefileSizeMB());
if (this.getStoreUncompressedSizeMB() != 0) {

View File

@ -953,6 +953,32 @@ public interface Admin extends Abortable, Closeable {
AdminProtos.GetRegionInfoResponse.CompactionState getCompactionStateForRegion(
final byte[] regionName) throws IOException;
/**
* Get the timestamp of the last major compaction for the passed table
*
* The timestamp of the oldest HFile resulting from a major compaction of that table,
* or 0 if no such HFile could be found.
*
* @param tableName table to examine
* @return the last major compaction timestamp or 0
* @throws IOException if a remote or network exception occurs
*/
long getLastMajorCompactionTimestamp(final TableName tableName)
throws IOException;
/**
* Get the timestamp of the last major compaction for the passed region.
*
* The timestamp of the oldest HFile resulting from a major compaction of that region,
* or 0 if no such HFile could be found.
*
* @param regionName region to examine
* @return the last major compaction timestamp or 0
* @throws IOException if a remote or network exception occurs
*/
long getLastMajorCompactionTimestampForRegion(final byte[] regionName)
throws IOException;
/**
* Take a snapshot for the given table. If the table is enabled, a FLUSH-type snapshot will be
* taken. If the table is disabled, an offline snapshot is taken. Snapshots are considered unique

View File

@ -134,6 +134,9 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescript
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnResponse;
@ -2006,6 +2009,20 @@ class ConnectionManager {
throws ServiceException {
return stub.getClusterStatus(controller, request);
}
@Override
public MajorCompactionTimestampResponse getLastMajorCompactionTimestamp(
RpcController controller, MajorCompactionTimestampRequest request)
throws ServiceException {
return stub.getLastMajorCompactionTimestamp(controller, request);
}
@Override
public MajorCompactionTimestampResponse getLastMajorCompactionTimestampForRegion(
RpcController controller, MajorCompactionTimestampForRegionRequest request)
throws ServiceException {
return stub.getLastMajorCompactionTimestampForRegion(controller, request);
}
};
}

View File

@ -83,6 +83,7 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfiguratio
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest;
@ -115,6 +116,8 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRes
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableRequest;
@ -3721,4 +3724,33 @@ public class HBaseAdmin implements Admin {
throw new IOException("Failed to get master info port from MasterAddressTracker", e);
}
}
@Override
public long getLastMajorCompactionTimestamp(final TableName tableName) throws IOException {
return executeCallable(new MasterCallable<Long>(getConnection()) {
@Override
public Long call(int callTimeout) throws ServiceException {
MajorCompactionTimestampRequest req =
MajorCompactionTimestampRequest.newBuilder()
.setTableName(ProtobufUtil.toProtoTableName(tableName)).build();
return master.getLastMajorCompactionTimestamp(null, req).getCompactionTimestamp();
}
});
}
@Override
public long getLastMajorCompactionTimestampForRegion(final byte[] regionName) throws IOException {
return executeCallable(new MasterCallable<Long>(getConnection()) {
@Override
public Long call(int callTimeout) throws ServiceException {
MajorCompactionTimestampForRegionRequest req =
MajorCompactionTimestampForRegionRequest
.newBuilder()
.setRegion(
RequestConverter
.buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName)).build();
return master.getLastMajorCompactionTimestampForRegion(null, req).getCompactionTimestamp();
}
});
}
}

View File

@ -56,6 +56,7 @@ public class HFileContext implements HeapSize, Cloneable {
private DataBlockEncoding encoding = DataBlockEncoding.NONE;
/** Encryption algorithm and key used */
private Encryption.Context cryptoContext = Encryption.Context.NONE;
private long fileCreateTime;
//Empty constructor. Go with setters
public HFileContext() {
@ -76,12 +77,13 @@ public class HFileContext implements HeapSize, Cloneable {
this.blocksize = context.blocksize;
this.encoding = context.encoding;
this.cryptoContext = context.cryptoContext;
this.fileCreateTime = context.fileCreateTime;
}
public HFileContext(boolean useHBaseChecksum, boolean includesMvcc, boolean includesTags,
Compression.Algorithm compressAlgo, boolean compressTags, ChecksumType checksumType,
int bytesPerChecksum, int blockSize, DataBlockEncoding encoding,
Encryption.Context cryptoContext) {
Encryption.Context cryptoContext, long fileCreateTime) {
this.usesHBaseChecksum = useHBaseChecksum;
this.includesMvcc = includesMvcc;
this.includesTags = includesTags;
@ -94,6 +96,7 @@ public class HFileContext implements HeapSize, Cloneable {
this.encoding = encoding;
}
this.cryptoContext = cryptoContext;
this.fileCreateTime = fileCreateTime;
}
/**
@ -141,6 +144,10 @@ public class HFileContext implements HeapSize, Cloneable {
this.includesTags = includesTags;
}
public void setFileCreateTime(long fileCreateTime) {
this.fileCreateTime = fileCreateTime;
}
public boolean isCompressTags() {
return compressTags;
}
@ -161,6 +168,10 @@ public class HFileContext implements HeapSize, Cloneable {
return blocksize;
}
public long getFileCreateTime() {
return fileCreateTime;
}
public DataBlockEncoding getDataBlockEncoding() {
return encoding;
}
@ -189,7 +200,8 @@ public class HFileContext implements HeapSize, Cloneable {
4 * ClassSize.REFERENCE +
2 * Bytes.SIZEOF_INT +
// usesHBaseChecksum, includesMvcc, includesTags and compressTags
4 * Bytes.SIZEOF_BOOLEAN);
4 * Bytes.SIZEOF_BOOLEAN +
Bytes.SIZEOF_LONG);
return size;
}

View File

@ -52,6 +52,7 @@ public class HFileContextBuilder {
private DataBlockEncoding encoding = DataBlockEncoding.NONE;
/** Crypto context */
private Encryption.Context cryptoContext = Encryption.Context.NONE;
private long fileCreateTime = 0;
public HFileContextBuilder withHBaseCheckSum(boolean useHBaseCheckSum) {
this.usesHBaseChecksum = useHBaseCheckSum;
@ -103,8 +104,14 @@ public class HFileContextBuilder {
return this;
}
public HFileContextBuilder withCreateTime(long fileCreateTime) {
this.fileCreateTime = fileCreateTime;
return this;
}
public HFileContext build() {
return new HFileContext(usesHBaseChecksum, includesMvcc, includesTags, compression,
compressTags, checksumType, bytesPerChecksum, blocksize, encoding, cryptoContext);
compressTags, checksumType, bytesPerChecksum, blocksize, encoding, cryptoContext,
fileCreateTime);
}
}

View File

@ -2171,6 +2171,16 @@ public final class ClusterStatusProtos {
* </pre>
*/
float getDataLocality();
// optional uint64 last_major_compaction_ts = 17 [default = 0];
/**
* <code>optional uint64 last_major_compaction_ts = 17 [default = 0];</code>
*/
boolean hasLastMajorCompactionTs();
/**
* <code>optional uint64 last_major_compaction_ts = 17 [default = 0];</code>
*/
long getLastMajorCompactionTs();
}
/**
* Protobuf type {@code RegionLoad}
@ -2311,6 +2321,11 @@ public final class ClusterStatusProtos {
dataLocality_ = input.readFloat();
break;
}
case 136: {
bitField0_ |= 0x00010000;
lastMajorCompactionTs_ = input.readUInt64();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
@ -2753,6 +2768,22 @@ public final class ClusterStatusProtos {
return dataLocality_;
}
// optional uint64 last_major_compaction_ts = 17 [default = 0];
public static final int LAST_MAJOR_COMPACTION_TS_FIELD_NUMBER = 17;
private long lastMajorCompactionTs_;
/**
* <code>optional uint64 last_major_compaction_ts = 17 [default = 0];</code>
*/
public boolean hasLastMajorCompactionTs() {
return ((bitField0_ & 0x00010000) == 0x00010000);
}
/**
* <code>optional uint64 last_major_compaction_ts = 17 [default = 0];</code>
*/
public long getLastMajorCompactionTs() {
return lastMajorCompactionTs_;
}
private void initFields() {
regionSpecifier_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
stores_ = 0;
@ -2770,6 +2801,7 @@ public final class ClusterStatusProtos {
totalStaticBloomSizeKB_ = 0;
completeSequenceId_ = 0L;
dataLocality_ = 0F;
lastMajorCompactionTs_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
@ -2839,6 +2871,9 @@ public final class ClusterStatusProtos {
if (((bitField0_ & 0x00008000) == 0x00008000)) {
output.writeFloat(16, dataLocality_);
}
if (((bitField0_ & 0x00010000) == 0x00010000)) {
output.writeUInt64(17, lastMajorCompactionTs_);
}
getUnknownFields().writeTo(output);
}
@ -2912,6 +2947,10 @@ public final class ClusterStatusProtos {
size += com.google.protobuf.CodedOutputStream
.computeFloatSize(16, dataLocality_);
}
if (((bitField0_ & 0x00010000) == 0x00010000)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(17, lastMajorCompactionTs_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
@ -3014,6 +3053,11 @@ public final class ClusterStatusProtos {
if (hasDataLocality()) {
result = result && (Float.floatToIntBits(getDataLocality()) == Float.floatToIntBits(other.getDataLocality()));
}
result = result && (hasLastMajorCompactionTs() == other.hasLastMajorCompactionTs());
if (hasLastMajorCompactionTs()) {
result = result && (getLastMajorCompactionTs()
== other.getLastMajorCompactionTs());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
@ -3092,6 +3136,10 @@ public final class ClusterStatusProtos {
hash = (53 * hash) + Float.floatToIntBits(
getDataLocality());
}
if (hasLastMajorCompactionTs()) {
hash = (37 * hash) + LAST_MAJOR_COMPACTION_TS_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getLastMajorCompactionTs());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
@ -3238,6 +3286,8 @@ public final class ClusterStatusProtos {
bitField0_ = (bitField0_ & ~0x00004000);
dataLocality_ = 0F;
bitField0_ = (bitField0_ & ~0x00008000);
lastMajorCompactionTs_ = 0L;
bitField0_ = (bitField0_ & ~0x00010000);
return this;
}
@ -3334,6 +3384,10 @@ public final class ClusterStatusProtos {
to_bitField0_ |= 0x00008000;
}
result.dataLocality_ = dataLocality_;
if (((from_bitField0_ & 0x00010000) == 0x00010000)) {
to_bitField0_ |= 0x00010000;
}
result.lastMajorCompactionTs_ = lastMajorCompactionTs_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
@ -3398,6 +3452,9 @@ public final class ClusterStatusProtos {
if (other.hasDataLocality()) {
setDataLocality(other.getDataLocality());
}
if (other.hasLastMajorCompactionTs()) {
setLastMajorCompactionTs(other.getLastMajorCompactionTs());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
@ -4337,6 +4394,39 @@ public final class ClusterStatusProtos {
return this;
}
// optional uint64 last_major_compaction_ts = 17 [default = 0];
private long lastMajorCompactionTs_ ;
/**
* <code>optional uint64 last_major_compaction_ts = 17 [default = 0];</code>
*/
public boolean hasLastMajorCompactionTs() {
return ((bitField0_ & 0x00010000) == 0x00010000);
}
/**
* <code>optional uint64 last_major_compaction_ts = 17 [default = 0];</code>
*/
public long getLastMajorCompactionTs() {
return lastMajorCompactionTs_;
}
/**
* <code>optional uint64 last_major_compaction_ts = 17 [default = 0];</code>
*/
public Builder setLastMajorCompactionTs(long value) {
bitField0_ |= 0x00010000;
lastMajorCompactionTs_ = value;
onChanged();
return this;
}
/**
* <code>optional uint64 last_major_compaction_ts = 17 [default = 0];</code>
*/
public Builder clearLastMajorCompactionTs() {
bitField0_ = (bitField0_ & ~0x00010000);
lastMajorCompactionTs_ = 0L;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:RegionLoad)
}
@ -10472,7 +10562,7 @@ public final class ClusterStatusProtos {
"PLITTING_NEW\020\r\022\017\n\013MERGING_NEW\020\016\"X\n\022Regio",
"nInTransition\022\036\n\004spec\030\001 \002(\0132\020.RegionSpec" +
"ifier\022\"\n\014region_state\030\002 \002(\0132\014.RegionStat" +
"e\"\347\003\n\nRegionLoad\022*\n\020region_specifier\030\001 \002" +
"e\"\214\004\n\nRegionLoad\022*\n\020region_specifier\030\001 \002" +
"(\0132\020.RegionSpecifier\022\016\n\006stores\030\002 \001(\r\022\022\n\n" +
"storefiles\030\003 \001(\r\022\"\n\032store_uncompressed_s" +
"ize_MB\030\004 \001(\r\022\031\n\021storefile_size_MB\030\005 \001(\r\022" +
@ -10484,26 +10574,27 @@ public final class ClusterStatusProtos {
"\030\014 \001(\r\022\"\n\032total_static_index_size_KB\030\r \001" +
"(\r\022\"\n\032total_static_bloom_size_KB\030\016 \001(\r\022\034" +
"\n\024complete_sequence_id\030\017 \001(\004\022\025\n\rdata_loc" +
"ality\030\020 \001(\002\"\212\002\n\nServerLoad\022\032\n\022number_of_" +
"requests\030\001 \001(\r\022 \n\030total_number_of_reques" +
"ts\030\002 \001(\r\022\024\n\014used_heap_MB\030\003 \001(\r\022\023\n\013max_he" +
"ap_MB\030\004 \001(\r\022!\n\014region_loads\030\005 \003(\0132\013.Regi" +
"onLoad\022\"\n\014coprocessors\030\006 \003(\0132\014.Coprocess" +
"or\022\031\n\021report_start_time\030\007 \001(\004\022\027\n\017report_",
"end_time\030\010 \001(\004\022\030\n\020info_server_port\030\t \001(\r" +
"\"O\n\016LiveServerInfo\022\033\n\006server\030\001 \002(\0132\013.Ser" +
"verName\022 \n\013server_load\030\002 \002(\0132\013.ServerLoa" +
"d\"\340\002\n\rClusterStatus\022/\n\rhbase_version\030\001 \001" +
"(\0132\030.HBaseVersionFileContent\022%\n\014live_ser" +
"vers\030\002 \003(\0132\017.LiveServerInfo\022!\n\014dead_serv" +
"ers\030\003 \003(\0132\013.ServerName\0222\n\025regions_in_tra" +
"nsition\030\004 \003(\0132\023.RegionInTransition\022\036\n\ncl" +
"uster_id\030\005 \001(\0132\n.ClusterId\022)\n\023master_cop" +
"rocessors\030\006 \003(\0132\014.Coprocessor\022\033\n\006master\030",
"\007 \001(\0132\013.ServerName\022#\n\016backup_masters\030\010 \003" +
"(\0132\013.ServerName\022\023\n\013balancer_on\030\t \001(\010BF\n*" +
"org.apache.hadoop.hbase.protobuf.generat" +
"edB\023ClusterStatusProtosH\001\240\001\001"
"ality\030\020 \001(\002\022#\n\030last_major_compaction_ts\030" +
"\021 \001(\004:\0010\"\212\002\n\nServerLoad\022\032\n\022number_of_req" +
"uests\030\001 \001(\r\022 \n\030total_number_of_requests\030" +
"\002 \001(\r\022\024\n\014used_heap_MB\030\003 \001(\r\022\023\n\013max_heap_" +
"MB\030\004 \001(\r\022!\n\014region_loads\030\005 \003(\0132\013.RegionL" +
"oad\022\"\n\014coprocessors\030\006 \003(\0132\014.Coprocessor\022",
"\031\n\021report_start_time\030\007 \001(\004\022\027\n\017report_end" +
"_time\030\010 \001(\004\022\030\n\020info_server_port\030\t \001(\r\"O\n" +
"\016LiveServerInfo\022\033\n\006server\030\001 \002(\0132\013.Server" +
"Name\022 \n\013server_load\030\002 \002(\0132\013.ServerLoad\"\340" +
"\002\n\rClusterStatus\022/\n\rhbase_version\030\001 \001(\0132" +
"\030.HBaseVersionFileContent\022%\n\014live_server" +
"s\030\002 \003(\0132\017.LiveServerInfo\022!\n\014dead_servers" +
"\030\003 \003(\0132\013.ServerName\0222\n\025regions_in_transi" +
"tion\030\004 \003(\0132\023.RegionInTransition\022\036\n\nclust" +
"er_id\030\005 \001(\0132\n.ClusterId\022)\n\023master_coproc",
"essors\030\006 \003(\0132\014.Coprocessor\022\033\n\006master\030\007 \001" +
"(\0132\013.ServerName\022#\n\016backup_masters\030\010 \003(\0132" +
"\013.ServerName\022\023\n\013balancer_on\030\t \001(\010BF\n*org" +
".apache.hadoop.hbase.protobuf.generatedB" +
"\023ClusterStatusProtosH\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@ -10527,7 +10618,7 @@ public final class ClusterStatusProtos {
internal_static_RegionLoad_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_RegionLoad_descriptor,
new java.lang.String[] { "RegionSpecifier", "Stores", "Storefiles", "StoreUncompressedSizeMB", "StorefileSizeMB", "MemstoreSizeMB", "StorefileIndexSizeMB", "ReadRequestsCount", "WriteRequestsCount", "TotalCompactingKVs", "CurrentCompactedKVs", "RootIndexSizeKB", "TotalStaticIndexSizeKB", "TotalStaticBloomSizeKB", "CompleteSequenceId", "DataLocality", });
new java.lang.String[] { "RegionSpecifier", "Stores", "Storefiles", "StoreUncompressedSizeMB", "StorefileSizeMB", "MemstoreSizeMB", "StorefileIndexSizeMB", "ReadRequestsCount", "WriteRequestsCount", "TotalCompactingKVs", "CurrentCompactedKVs", "RootIndexSizeKB", "TotalStaticIndexSizeKB", "TotalStaticBloomSizeKB", "CompleteSequenceId", "DataLocality", "LastMajorCompactionTs", });
internal_static_ServerLoad_descriptor =
getDescriptor().getMessageTypes().get(3);
internal_static_ServerLoad_fieldAccessorTable = new

View File

@ -113,6 +113,8 @@ message RegionLoad {
/** The current data locality for region in the regionserver */
optional float data_locality = 16;
optional uint64 last_major_compaction_ts = 17 [default = 0];
}
/* Server-level protobufs */

View File

@ -364,6 +364,18 @@ message IsProcedureDoneResponse {
optional ProcedureDescription snapshot = 2;
}
message MajorCompactionTimestampRequest {
required TableName table_name = 1;
}
message MajorCompactionTimestampForRegionRequest {
required RegionSpecifier region = 1;
}
message MajorCompactionTimestampResponse {
required int64 compaction_timestamp = 1;
}
service MasterService {
/** Used by the client to get the number of regions that have received the updated schema */
rpc GetSchemaAlterStatus(GetSchemaAlterStatusRequest)
@ -571,4 +583,12 @@ service MasterService {
/** returns a list of tables for a given namespace*/
rpc ListTableNamesByNamespace(ListTableNamesByNamespaceRequest)
returns(ListTableNamesByNamespaceResponse);
/** Returns the timestamp of the last major compaction */
rpc getLastMajorCompactionTimestamp(MajorCompactionTimestampRequest)
returns(MajorCompactionTimestampResponse);
/** Returns the timestamp of the last major compaction */
rpc getLastMajorCompactionTimestampForRegion(MajorCompactionTimestampForRegionRequest)
returns(MajorCompactionTimestampResponse);
}

View File

@ -148,6 +148,9 @@ public abstract class AbstractHFileWriter implements HFile.Writer {
int avgValueLen =
entryCount == 0 ? 0 : (int) (totalValueLength / entryCount);
fileInfo.append(FileInfo.AVG_VALUE_LEN, Bytes.toBytes(avgValueLen), false);
fileInfo.append(FileInfo.CREATE_TIME_TS, Bytes.toBytes(hFileContext.getFileCreateTime()),
false);
}
/**

View File

@ -540,6 +540,7 @@ public class HFile {
static final byte [] LASTKEY = Bytes.toBytes(RESERVED_PREFIX + "LASTKEY");
static final byte [] AVG_KEY_LEN = Bytes.toBytes(RESERVED_PREFIX + "AVG_KEY_LEN");
static final byte [] AVG_VALUE_LEN = Bytes.toBytes(RESERVED_PREFIX + "AVG_VALUE_LEN");
static final byte [] CREATE_TIME_TS = Bytes.toBytes(RESERVED_PREFIX + "CREATE_TIME_TS");
static final byte [] COMPARATOR = Bytes.toBytes(RESERVED_PREFIX + "COMPARATOR");
static final byte [] TAGS_COMPRESSED = Bytes.toBytes(RESERVED_PREFIX + "TAGS_COMPRESSED");
public static final byte [] MAX_TAGS_LEN = Bytes.toBytes(RESERVED_PREFIX + "MAX_TAGS_LEN");

View File

@ -157,6 +157,7 @@ public class HFileReaderV2 extends AbstractHFileReader {
// File info
fileInfo = new FileInfo();
fileInfo.read(blockIter.nextBlockWithBlockType(BlockType.FILE_INFO).getByteStream());
this.hfileContext.setFileCreateTime(Bytes.toLong(fileInfo.get(FileInfo.CREATE_TIME_TS)));
lastKey = fileInfo.get(FileInfo.LASTKEY);
avgKeyLen = Bytes.toInt(fileInfo.get(FileInfo.AVG_KEY_LEN));
avgValueLen = Bytes.toInt(fileInfo.get(FileInfo.AVG_VALUE_LEN));

View File

@ -2305,4 +2305,14 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
}
}
}
@Override
public long getLastMajorCompactionTimestamp(TableName table) throws IOException {
return getClusterStatus().getLastMajorCompactionTsForTable(table);
}
@Override
public long getLastMajorCompactionTimestampForRegion(byte[] regionName) throws IOException {
return getClusterStatus().getLastMajorCompactionTsForRegion(regionName);
}
}

View File

@ -106,6 +106,9 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescript
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnResponse;
@ -1243,4 +1246,35 @@ public class MasterRpcServices extends RSRpcServices
throw new ServiceException(ioe);
}
}
@Override
public MajorCompactionTimestampResponse getLastMajorCompactionTimestamp(RpcController controller,
MajorCompactionTimestampRequest request) throws ServiceException {
MajorCompactionTimestampResponse.Builder response =
MajorCompactionTimestampResponse.newBuilder();
try {
master.checkInitialized();
response.setCompactionTimestamp(master.getLastMajorCompactionTimestamp(ProtobufUtil
.toTableName(request.getTableName())));
} catch (IOException e) {
throw new ServiceException(e);
}
return response.build();
}
@Override
public MajorCompactionTimestampResponse getLastMajorCompactionTimestampForRegion(
RpcController controller, MajorCompactionTimestampForRegionRequest request)
throws ServiceException {
MajorCompactionTimestampResponse.Builder response =
MajorCompactionTimestampResponse.newBuilder();
try {
master.checkInitialized();
response.setCompactionTimestamp(master.getLastMajorCompactionTimestampForRegion(request
.getRegion().getValue().toByteArray()));
} catch (IOException e) {
throw new ServiceException(e);
}
return response.build();
}
}

View File

@ -250,4 +250,20 @@ public interface MasterServices extends Server {
* @throws IOException
*/
public List<TableName> listTableNamesByNamespace(String name) throws IOException;
/**
* @param table
* @return the timestamp of the last successful major compaction for the passed table,
* or 0 if no HFile resulting from a major compaction exists
* @throws IOException
*/
public long getLastMajorCompactionTimestamp(TableName table) throws IOException;
/**
* @param regionName
* @return the timestamp of the last successful major compaction for the passed region
* or 0 if no HFile resulting from a major compaction exists
* @throws IOException
*/
public long getLastMajorCompactionTimestampForRegion(byte[] regionName) throws IOException;
}

View File

@ -118,6 +118,7 @@ import org.apache.hadoop.hbase.io.HeapSize;
import org.apache.hadoop.hbase.io.TimeRange;
import org.apache.hadoop.hbase.io.hfile.BlockCache;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.ipc.CallerDisconnectedException;
import org.apache.hadoop.hbase.ipc.RpcCallContext;
import org.apache.hadoop.hbase.ipc.RpcServer;
@ -1492,6 +1493,28 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { //
return Collections.min(lastStoreFlushTimeMap.values());
}
/**
* This can be used to determine the last time all files of this region were major compacted.
* @param majorCompactioOnly Only consider HFile that are the result of major compaction
* @return the timestamp of the oldest HFile for all stores of this region
*/
public long getOldestHfileTs(boolean majorCompactioOnly) throws IOException {
long result = Long.MAX_VALUE;
for (Store store : getStores().values()) {
for (StoreFile file : store.getStorefiles()) {
HFile.Reader reader = file.getReader().getHFileReader();
if (majorCompactioOnly) {
byte[] val = reader.loadFileInfo().get(StoreFile.MAJOR_COMPACTION_KEY);
if (val == null || !Bytes.toBoolean(val)) {
continue;
}
}
result = Math.min(result, reader.getFileContext().getFileCreateTime());
}
}
return result == Long.MAX_VALUE ? 0 : result;
}
//////////////////////////////////////////////////////////////////////////////
// HRegion maintenance.
//

View File

@ -1328,7 +1328,7 @@ public class HRegionServer extends HasThread implements
* @throws IOException
*/
private RegionLoad createRegionLoad(final HRegion r, RegionLoad.Builder regionLoadBldr,
RegionSpecifier.Builder regionSpecifier) {
RegionSpecifier.Builder regionSpecifier) throws IOException {
byte[] name = r.getRegionName();
int stores = 0;
int storefiles = 0;
@ -1390,8 +1390,8 @@ public class HRegionServer extends HasThread implements
.setTotalCompactingKVs(totalCompactingKVs)
.setCurrentCompactedKVs(currentCompactedKVs)
.setCompleteSequenceId(r.maxFlushedSeqId)
.setDataLocality(dataLocality);
.setDataLocality(dataLocality)
.setLastMajorCompactionTs(r.getOldestHfileTs(true));
return regionLoadBldr.build();
}
@ -1399,7 +1399,7 @@ public class HRegionServer extends HasThread implements
* @param encodedRegionName
* @return An instance of RegionLoad.
*/
public RegionLoad createRegionLoad(final String encodedRegionName) {
public RegionLoad createRegionLoad(final String encodedRegionName) throws IOException {
HRegion r = null;
r = this.onlineRegions.get(encodedRegionName);
return r != null ? createRegionLoad(r, null, null) : null;

View File

@ -991,6 +991,7 @@ public class HStore implements Store {
.withHBaseCheckSum(true)
.withDataBlockEncoding(family.getDataBlockEncoding())
.withEncryptionContext(cryptoContext)
.withCreateTime(EnvironmentEdgeManager.currentTime())
.build();
return hFileContext;
}

View File

@ -411,6 +411,58 @@ public class TestAdmin1 {
table.close();
}
@Test (timeout=300000)
public void testCompactionTimestamps() throws Exception {
HColumnDescriptor fam1 = new HColumnDescriptor("fam1");
TableName tableName = TableName.valueOf("testCompactionTimestampsTable");
HTableDescriptor htd = new HTableDescriptor(tableName);
htd.addFamily(fam1);
this.admin.createTable(htd);
HTable table = (HTable)TEST_UTIL.getConnection().getTable(htd.getTableName());
long ts = this.admin.getLastMajorCompactionTimestamp(tableName);
assertEquals(0, ts);
Put p = new Put(Bytes.toBytes("row1"));
p.add(Bytes.toBytes("fam1"), Bytes.toBytes("fam1"), Bytes.toBytes("fam1"));
table.put(p);
ts = this.admin.getLastMajorCompactionTimestamp(tableName);
// no files written -> no data
assertEquals(0, ts);
this.admin.flush(tableName);
ts = this.admin.getLastMajorCompactionTimestamp(tableName);
// still 0, we flushed a file, but no major compaction happened
assertEquals(0, ts);
byte[] regionName =
table.getRegionLocator().getAllRegionLocations().get(0).getRegionInfo().getRegionName();
long ts1 = this.admin.getLastMajorCompactionTimestampForRegion(regionName);
assertEquals(ts, ts1);
p = new Put(Bytes.toBytes("row2"));
p.add(Bytes.toBytes("fam1"), Bytes.toBytes("fam1"), Bytes.toBytes("fam1"));
table.put(p);
this.admin.flush(tableName);
ts = this.admin.getLastMajorCompactionTimestamp(tableName);
// make sure the region API returns the same value, as the old file is still around
assertEquals(ts1, ts);
TEST_UTIL.compact(tableName, true);
table.put(p);
// forces a wait for the compaction
this.admin.flush(tableName);
ts = this.admin.getLastMajorCompactionTimestamp(tableName);
// after a compaction our earliest timestamp will have progressed forward
assertTrue(ts > ts1);
// region api still the same
ts1 = this.admin.getLastMajorCompactionTimestampForRegion(regionName);
assertEquals(ts, ts1);
table.put(p);
this.admin.flush(tableName);
ts = this.admin.getLastMajorCompactionTimestamp(tableName);
assertEquals(ts, ts1);
table.close();
}
@Test (timeout=300000)
public void testHColumnValidName() {
boolean exceptionThrown;

View File

@ -427,6 +427,18 @@ public class TestCatalogJanitor {
// Auto-generated method stub
return false;
}
@Override
public long getLastMajorCompactionTimestamp(TableName table) throws IOException {
// Auto-generated method stub
return 0;
}
@Override
public long getLastMajorCompactionTimestampForRegion(byte[] regionName) throws IOException {
// Auto-generated method stub
return 0;
}
}
@Test