diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java index b93312a882e..2791a042992 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java @@ -268,6 +268,29 @@ public class ClusterStatus extends VersionedWritable { return masterCoprocessors; } + public long getLastMajorCompactionTsForTable(TableName table) { + long result = Long.MAX_VALUE; + for (ServerName server : getServers()) { + ServerLoad load = getLoad(server); + for (RegionLoad rl : load.getRegionsLoad().values()) { + if (table.equals(HRegionInfo.getTable(rl.getName()))) { + result = Math.min(result, rl.getLastMajorCompactionTs()); + } + } + } + return result == Long.MAX_VALUE ? 0 : result; + } + + public long getLastMajorCompactionTsForRegion(final byte[] region) { + for (ServerName server : getServers()) { + ServerLoad load = getLoad(server); + RegionLoad rl = load.getRegionsLoad().get(region); + if (rl != null) { + return rl.getLastMajorCompactionTs(); + } + } + return 0; + } public boolean isBalancerOn() { return balancerOn != null && balancerOn; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java index 234c5aed910..794e8b217fa 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java @@ -169,6 +169,14 @@ public class RegionLoad { } return 0.0f; } + + /** + * @return the timestamp of the oldest hfile for any store of this region. + */ + public long getLastMajorCompactionTs() { + return regionLoadPB.getLastMajorCompactionTs(); + } + /** * @see java.lang.Object#toString() */ @@ -179,7 +187,9 @@ public class RegionLoad { sb = Strings.appendKeyValue(sb, "numberOfStorefiles", this.getStorefiles()); sb = Strings.appendKeyValue(sb, "storefileUncompressedSizeMB", - this.getStoreUncompressedSizeMB()); + this.getStoreUncompressedSizeMB()); + sb = Strings.appendKeyValue(sb, "lastMajorCompactionTimestamp", + this.getLastMajorCompactionTs()); sb = Strings.appendKeyValue(sb, "storefileSizeMB", this.getStorefileSizeMB()); if (this.getStoreUncompressedSizeMB() != 0) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index c5d95562a44..70ed23119b0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -957,6 +957,32 @@ public interface Admin extends Abortable, Closeable { AdminProtos.GetRegionInfoResponse.CompactionState getCompactionStateForRegion( final byte[] regionName) throws IOException; + /** + * Get the timestamp of the last major compaction for the passed table + * + * The timestamp of the oldest HFile resulting from a major compaction of that table, + * or 0 if no such HFile could be found. + * + * @param tableName table to examine + * @return the last major compaction timestamp or 0 + * @throws IOException if a remote or network exception occurs + */ + long getLastMajorCompactionTimestamp(final TableName tableName) + throws IOException; + + /** + * Get the timestamp of the last major compaction for the passed region. + * + * The timestamp of the oldest HFile resulting from a major compaction of that region, + * or 0 if no such HFile could be found. + * + * @param regionName region to examine + * @return the last major compaction timestamp or 0 + * @throws IOException if a remote or network exception occurs + */ + long getLastMajorCompactionTimestampForRegion(final byte[] regionName) + throws IOException; + /** * Take a snapshot for the given table. If the table is enabled, a FLUSH-type snapshot will be * taken. If the table is disabled, an offline snapshot is taken. Snapshots are considered unique diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java index bb1fe7bf528..c5ddb5421ee 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java @@ -136,6 +136,9 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescript import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnResponse; @@ -2037,6 +2040,20 @@ final class ConnectionManager { throws ServiceException { return stub.setQuota(controller, request); } + + @Override + public MajorCompactionTimestampResponse getLastMajorCompactionTimestamp( + RpcController controller, MajorCompactionTimestampRequest request) + throws ServiceException { + return stub.getLastMajorCompactionTimestamp(controller, request); + } + + @Override + public MajorCompactionTimestampResponse getLastMajorCompactionTimestampForRegion( + RpcController controller, MajorCompactionTimestampForRegionRequest request) + throws ServiceException { + return stub.getLastMajorCompactionTimestampForRegion(controller, request); + } }; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index ec0ee439afc..d14e369bb80 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -83,6 +83,7 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfiguratio import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest; @@ -115,6 +116,8 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRes import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableRequest; @@ -3771,4 +3774,33 @@ public class HBaseAdmin implements Admin { throw new IOException("Failed to get master info port from MasterAddressTracker", e); } } + + @Override + public long getLastMajorCompactionTimestamp(final TableName tableName) throws IOException { + return executeCallable(new MasterCallable(getConnection()) { + @Override + public Long call(int callTimeout) throws ServiceException { + MajorCompactionTimestampRequest req = + MajorCompactionTimestampRequest.newBuilder() + .setTableName(ProtobufUtil.toProtoTableName(tableName)).build(); + return master.getLastMajorCompactionTimestamp(null, req).getCompactionTimestamp(); + } + }); + } + + @Override + public long getLastMajorCompactionTimestampForRegion(final byte[] regionName) throws IOException { + return executeCallable(new MasterCallable(getConnection()) { + @Override + public Long call(int callTimeout) throws ServiceException { + MajorCompactionTimestampForRegionRequest req = + MajorCompactionTimestampForRegionRequest + .newBuilder() + .setRegion( + RequestConverter + .buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName)).build(); + return master.getLastMajorCompactionTimestampForRegion(null, req).getCompactionTimestamp(); + } + }); + } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContext.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContext.java index ce8b71a307b..5f43444a98b 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContext.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContext.java @@ -56,6 +56,7 @@ public class HFileContext implements HeapSize, Cloneable { private DataBlockEncoding encoding = DataBlockEncoding.NONE; /** Encryption algorithm and key used */ private Encryption.Context cryptoContext = Encryption.Context.NONE; + private long fileCreateTime; //Empty constructor. Go with setters public HFileContext() { @@ -76,12 +77,13 @@ public class HFileContext implements HeapSize, Cloneable { this.blocksize = context.blocksize; this.encoding = context.encoding; this.cryptoContext = context.cryptoContext; + this.fileCreateTime = context.fileCreateTime; } public HFileContext(boolean useHBaseChecksum, boolean includesMvcc, boolean includesTags, Compression.Algorithm compressAlgo, boolean compressTags, ChecksumType checksumType, int bytesPerChecksum, int blockSize, DataBlockEncoding encoding, - Encryption.Context cryptoContext) { + Encryption.Context cryptoContext, long fileCreateTime) { this.usesHBaseChecksum = useHBaseChecksum; this.includesMvcc = includesMvcc; this.includesTags = includesTags; @@ -94,6 +96,7 @@ public class HFileContext implements HeapSize, Cloneable { this.encoding = encoding; } this.cryptoContext = cryptoContext; + this.fileCreateTime = fileCreateTime; } /** @@ -141,6 +144,10 @@ public class HFileContext implements HeapSize, Cloneable { this.includesTags = includesTags; } + public void setFileCreateTime(long fileCreateTime) { + this.fileCreateTime = fileCreateTime; + } + public boolean isCompressTags() { return compressTags; } @@ -161,6 +168,10 @@ public class HFileContext implements HeapSize, Cloneable { return blocksize; } + public long getFileCreateTime() { + return fileCreateTime; + } + public DataBlockEncoding getDataBlockEncoding() { return encoding; } @@ -189,7 +200,8 @@ public class HFileContext implements HeapSize, Cloneable { 4 * ClassSize.REFERENCE + 2 * Bytes.SIZEOF_INT + // usesHBaseChecksum, includesMvcc, includesTags and compressTags - 4 * Bytes.SIZEOF_BOOLEAN); + 4 * Bytes.SIZEOF_BOOLEAN + + Bytes.SIZEOF_LONG); return size; } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContextBuilder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContextBuilder.java index 5c5d75fe6ce..0d1e6ef775d 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContextBuilder.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContextBuilder.java @@ -52,6 +52,7 @@ public class HFileContextBuilder { private DataBlockEncoding encoding = DataBlockEncoding.NONE; /** Crypto context */ private Encryption.Context cryptoContext = Encryption.Context.NONE; + private long fileCreateTime = 0; public HFileContextBuilder withHBaseCheckSum(boolean useHBaseCheckSum) { this.usesHBaseChecksum = useHBaseCheckSum; @@ -103,8 +104,14 @@ public class HFileContextBuilder { return this; } + public HFileContextBuilder withCreateTime(long fileCreateTime) { + this.fileCreateTime = fileCreateTime; + return this; + } + public HFileContext build() { return new HFileContext(usesHBaseChecksum, includesMvcc, includesTags, compression, - compressTags, checksumType, bytesPerChecksum, blocksize, encoding, cryptoContext); + compressTags, checksumType, bytesPerChecksum, blocksize, encoding, cryptoContext, + fileCreateTime); } } diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java index 5bc44ff2dfe..6dc48fa12db 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java @@ -2171,6 +2171,16 @@ public final class ClusterStatusProtos { * */ float getDataLocality(); + + // optional uint64 last_major_compaction_ts = 17 [default = 0]; + /** + * optional uint64 last_major_compaction_ts = 17 [default = 0]; + */ + boolean hasLastMajorCompactionTs(); + /** + * optional uint64 last_major_compaction_ts = 17 [default = 0]; + */ + long getLastMajorCompactionTs(); } /** * Protobuf type {@code RegionLoad} @@ -2311,6 +2321,11 @@ public final class ClusterStatusProtos { dataLocality_ = input.readFloat(); break; } + case 136: { + bitField0_ |= 0x00010000; + lastMajorCompactionTs_ = input.readUInt64(); + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -2753,6 +2768,22 @@ public final class ClusterStatusProtos { return dataLocality_; } + // optional uint64 last_major_compaction_ts = 17 [default = 0]; + public static final int LAST_MAJOR_COMPACTION_TS_FIELD_NUMBER = 17; + private long lastMajorCompactionTs_; + /** + * optional uint64 last_major_compaction_ts = 17 [default = 0]; + */ + public boolean hasLastMajorCompactionTs() { + return ((bitField0_ & 0x00010000) == 0x00010000); + } + /** + * optional uint64 last_major_compaction_ts = 17 [default = 0]; + */ + public long getLastMajorCompactionTs() { + return lastMajorCompactionTs_; + } + private void initFields() { regionSpecifier_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); stores_ = 0; @@ -2770,6 +2801,7 @@ public final class ClusterStatusProtos { totalStaticBloomSizeKB_ = 0; completeSequenceId_ = 0L; dataLocality_ = 0F; + lastMajorCompactionTs_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -2839,6 +2871,9 @@ public final class ClusterStatusProtos { if (((bitField0_ & 0x00008000) == 0x00008000)) { output.writeFloat(16, dataLocality_); } + if (((bitField0_ & 0x00010000) == 0x00010000)) { + output.writeUInt64(17, lastMajorCompactionTs_); + } getUnknownFields().writeTo(output); } @@ -2912,6 +2947,10 @@ public final class ClusterStatusProtos { size += com.google.protobuf.CodedOutputStream .computeFloatSize(16, dataLocality_); } + if (((bitField0_ & 0x00010000) == 0x00010000)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(17, lastMajorCompactionTs_); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -3014,6 +3053,11 @@ public final class ClusterStatusProtos { if (hasDataLocality()) { result = result && (Float.floatToIntBits(getDataLocality()) == Float.floatToIntBits(other.getDataLocality())); } + result = result && (hasLastMajorCompactionTs() == other.hasLastMajorCompactionTs()); + if (hasLastMajorCompactionTs()) { + result = result && (getLastMajorCompactionTs() + == other.getLastMajorCompactionTs()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -3092,6 +3136,10 @@ public final class ClusterStatusProtos { hash = (53 * hash) + Float.floatToIntBits( getDataLocality()); } + if (hasLastMajorCompactionTs()) { + hash = (37 * hash) + LAST_MAJOR_COMPACTION_TS_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getLastMajorCompactionTs()); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -3238,6 +3286,8 @@ public final class ClusterStatusProtos { bitField0_ = (bitField0_ & ~0x00004000); dataLocality_ = 0F; bitField0_ = (bitField0_ & ~0x00008000); + lastMajorCompactionTs_ = 0L; + bitField0_ = (bitField0_ & ~0x00010000); return this; } @@ -3334,6 +3384,10 @@ public final class ClusterStatusProtos { to_bitField0_ |= 0x00008000; } result.dataLocality_ = dataLocality_; + if (((from_bitField0_ & 0x00010000) == 0x00010000)) { + to_bitField0_ |= 0x00010000; + } + result.lastMajorCompactionTs_ = lastMajorCompactionTs_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -3398,6 +3452,9 @@ public final class ClusterStatusProtos { if (other.hasDataLocality()) { setDataLocality(other.getDataLocality()); } + if (other.hasLastMajorCompactionTs()) { + setLastMajorCompactionTs(other.getLastMajorCompactionTs()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -4337,6 +4394,39 @@ public final class ClusterStatusProtos { return this; } + // optional uint64 last_major_compaction_ts = 17 [default = 0]; + private long lastMajorCompactionTs_ ; + /** + * optional uint64 last_major_compaction_ts = 17 [default = 0]; + */ + public boolean hasLastMajorCompactionTs() { + return ((bitField0_ & 0x00010000) == 0x00010000); + } + /** + * optional uint64 last_major_compaction_ts = 17 [default = 0]; + */ + public long getLastMajorCompactionTs() { + return lastMajorCompactionTs_; + } + /** + * optional uint64 last_major_compaction_ts = 17 [default = 0]; + */ + public Builder setLastMajorCompactionTs(long value) { + bitField0_ |= 0x00010000; + lastMajorCompactionTs_ = value; + onChanged(); + return this; + } + /** + * optional uint64 last_major_compaction_ts = 17 [default = 0]; + */ + public Builder clearLastMajorCompactionTs() { + bitField0_ = (bitField0_ & ~0x00010000); + lastMajorCompactionTs_ = 0L; + onChanged(); + return this; + } + // @@protoc_insertion_point(builder_scope:RegionLoad) } @@ -10472,7 +10562,7 @@ public final class ClusterStatusProtos { "PLITTING_NEW\020\r\022\017\n\013MERGING_NEW\020\016\"X\n\022Regio", "nInTransition\022\036\n\004spec\030\001 \002(\0132\020.RegionSpec" + "ifier\022\"\n\014region_state\030\002 \002(\0132\014.RegionStat" + - "e\"\347\003\n\nRegionLoad\022*\n\020region_specifier\030\001 \002" + + "e\"\214\004\n\nRegionLoad\022*\n\020region_specifier\030\001 \002" + "(\0132\020.RegionSpecifier\022\016\n\006stores\030\002 \001(\r\022\022\n\n" + "storefiles\030\003 \001(\r\022\"\n\032store_uncompressed_s" + "ize_MB\030\004 \001(\r\022\031\n\021storefile_size_MB\030\005 \001(\r\022" + @@ -10484,26 +10574,27 @@ public final class ClusterStatusProtos { "\030\014 \001(\r\022\"\n\032total_static_index_size_KB\030\r \001" + "(\r\022\"\n\032total_static_bloom_size_KB\030\016 \001(\r\022\034" + "\n\024complete_sequence_id\030\017 \001(\004\022\025\n\rdata_loc" + - "ality\030\020 \001(\002\"\212\002\n\nServerLoad\022\032\n\022number_of_" + - "requests\030\001 \001(\r\022 \n\030total_number_of_reques" + - "ts\030\002 \001(\r\022\024\n\014used_heap_MB\030\003 \001(\r\022\023\n\013max_he" + - "ap_MB\030\004 \001(\r\022!\n\014region_loads\030\005 \003(\0132\013.Regi" + - "onLoad\022\"\n\014coprocessors\030\006 \003(\0132\014.Coprocess" + - "or\022\031\n\021report_start_time\030\007 \001(\004\022\027\n\017report_", - "end_time\030\010 \001(\004\022\030\n\020info_server_port\030\t \001(\r" + - "\"O\n\016LiveServerInfo\022\033\n\006server\030\001 \002(\0132\013.Ser" + - "verName\022 \n\013server_load\030\002 \002(\0132\013.ServerLoa" + - "d\"\340\002\n\rClusterStatus\022/\n\rhbase_version\030\001 \001" + - "(\0132\030.HBaseVersionFileContent\022%\n\014live_ser" + - "vers\030\002 \003(\0132\017.LiveServerInfo\022!\n\014dead_serv" + - "ers\030\003 \003(\0132\013.ServerName\0222\n\025regions_in_tra" + - "nsition\030\004 \003(\0132\023.RegionInTransition\022\036\n\ncl" + - "uster_id\030\005 \001(\0132\n.ClusterId\022)\n\023master_cop" + - "rocessors\030\006 \003(\0132\014.Coprocessor\022\033\n\006master\030", - "\007 \001(\0132\013.ServerName\022#\n\016backup_masters\030\010 \003" + - "(\0132\013.ServerName\022\023\n\013balancer_on\030\t \001(\010BF\n*" + - "org.apache.hadoop.hbase.protobuf.generat" + - "edB\023ClusterStatusProtosH\001\240\001\001" + "ality\030\020 \001(\002\022#\n\030last_major_compaction_ts\030" + + "\021 \001(\004:\0010\"\212\002\n\nServerLoad\022\032\n\022number_of_req" + + "uests\030\001 \001(\r\022 \n\030total_number_of_requests\030" + + "\002 \001(\r\022\024\n\014used_heap_MB\030\003 \001(\r\022\023\n\013max_heap_" + + "MB\030\004 \001(\r\022!\n\014region_loads\030\005 \003(\0132\013.RegionL" + + "oad\022\"\n\014coprocessors\030\006 \003(\0132\014.Coprocessor\022", + "\031\n\021report_start_time\030\007 \001(\004\022\027\n\017report_end" + + "_time\030\010 \001(\004\022\030\n\020info_server_port\030\t \001(\r\"O\n" + + "\016LiveServerInfo\022\033\n\006server\030\001 \002(\0132\013.Server" + + "Name\022 \n\013server_load\030\002 \002(\0132\013.ServerLoad\"\340" + + "\002\n\rClusterStatus\022/\n\rhbase_version\030\001 \001(\0132" + + "\030.HBaseVersionFileContent\022%\n\014live_server" + + "s\030\002 \003(\0132\017.LiveServerInfo\022!\n\014dead_servers" + + "\030\003 \003(\0132\013.ServerName\0222\n\025regions_in_transi" + + "tion\030\004 \003(\0132\023.RegionInTransition\022\036\n\nclust" + + "er_id\030\005 \001(\0132\n.ClusterId\022)\n\023master_coproc", + "essors\030\006 \003(\0132\014.Coprocessor\022\033\n\006master\030\007 \001" + + "(\0132\013.ServerName\022#\n\016backup_masters\030\010 \003(\0132" + + "\013.ServerName\022\023\n\013balancer_on\030\t \001(\010BF\n*org" + + ".apache.hadoop.hbase.protobuf.generatedB" + + "\023ClusterStatusProtosH\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -10527,7 +10618,7 @@ public final class ClusterStatusProtos { internal_static_RegionLoad_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_RegionLoad_descriptor, - new java.lang.String[] { "RegionSpecifier", "Stores", "Storefiles", "StoreUncompressedSizeMB", "StorefileSizeMB", "MemstoreSizeMB", "StorefileIndexSizeMB", "ReadRequestsCount", "WriteRequestsCount", "TotalCompactingKVs", "CurrentCompactedKVs", "RootIndexSizeKB", "TotalStaticIndexSizeKB", "TotalStaticBloomSizeKB", "CompleteSequenceId", "DataLocality", }); + new java.lang.String[] { "RegionSpecifier", "Stores", "Storefiles", "StoreUncompressedSizeMB", "StorefileSizeMB", "MemstoreSizeMB", "StorefileIndexSizeMB", "ReadRequestsCount", "WriteRequestsCount", "TotalCompactingKVs", "CurrentCompactedKVs", "RootIndexSizeKB", "TotalStaticIndexSizeKB", "TotalStaticBloomSizeKB", "CompleteSequenceId", "DataLocality", "LastMajorCompactionTs", }); internal_static_ServerLoad_descriptor = getDescriptor().getMessageTypes().get(3); internal_static_ServerLoad_fieldAccessorTable = new diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java index 4f7f9545ecd..a0213f2e94e 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java @@ -44380,6 +44380,1570 @@ public final class MasterProtos { // @@protoc_insertion_point(class_scope:SetQuotaResponse) } + public interface MajorCompactionTimestampRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .TableName table_name = 1; + /** + * required .TableName table_name = 1; + */ + boolean hasTableName(); + /** + * required .TableName table_name = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName(); + /** + * required .TableName table_name = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder(); + } + /** + * Protobuf type {@code MajorCompactionTimestampRequest} + */ + public static final class MajorCompactionTimestampRequest extends + com.google.protobuf.GeneratedMessage + implements MajorCompactionTimestampRequestOrBuilder { + // Use MajorCompactionTimestampRequest.newBuilder() to construct. + private MajorCompactionTimestampRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private MajorCompactionTimestampRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final MajorCompactionTimestampRequest defaultInstance; + public static MajorCompactionTimestampRequest getDefaultInstance() { + return defaultInstance; + } + + public MajorCompactionTimestampRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private MajorCompactionTimestampRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = tableName_.toBuilder(); + } + tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(tableName_); + tableName_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_MajorCompactionTimestampRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_MajorCompactionTimestampRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public MajorCompactionTimestampRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new MajorCompactionTimestampRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .TableName table_name = 1; + public static final int TABLE_NAME_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_; + /** + * required .TableName table_name = 1; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + return tableName_; + } + /** + * required .TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + return tableName_; + } + + private void initFields() { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasTableName()) { + memoizedIsInitialized = 0; + return false; + } + if (!getTableName().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, tableName_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, tableName_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest) obj; + + boolean result = true; + result = result && (hasTableName() == other.hasTableName()); + if (hasTableName()) { + result = result && getTableName() + .equals(other.getTableName()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasTableName()) { + hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER; + hash = (53 * hash) + getTableName().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code MajorCompactionTimestampRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_MajorCompactionTimestampRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_MajorCompactionTimestampRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableNameFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + } else { + tableNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_MajorCompactionTimestampRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (tableNameBuilder_ == null) { + result.tableName_ = tableName_; + } else { + result.tableName_ = tableNameBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.getDefaultInstance()) return this; + if (other.hasTableName()) { + mergeTableName(other.getTableName()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasTableName()) { + + return false; + } + if (!getTableName().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .TableName table_name = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_; + /** + * required .TableName table_name = 1; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + if (tableNameBuilder_ == null) { + return tableName_; + } else { + return tableNameBuilder_.getMessage(); + } + } + /** + * required .TableName table_name = 1; + */ + public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableName_ = value; + onChanged(); + } else { + tableNameBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .TableName table_name = 1; + */ + public Builder setTableName( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + tableName_ = builderForValue.build(); + onChanged(); + } else { + tableNameBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .TableName table_name = 1; + */ + public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) { + tableName_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial(); + } else { + tableName_ = value; + } + onChanged(); + } else { + tableNameBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .TableName table_name = 1; + */ + public Builder clearTableName() { + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + onChanged(); + } else { + tableNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getTableNameFieldBuilder().getBuilder(); + } + /** + * required .TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + if (tableNameBuilder_ != null) { + return tableNameBuilder_.getMessageOrBuilder(); + } else { + return tableName_; + } + } + /** + * required .TableName table_name = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableNameFieldBuilder() { + if (tableNameBuilder_ == null) { + tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + tableName_, + getParentForChildren(), + isClean()); + tableName_ = null; + } + return tableNameBuilder_; + } + + // @@protoc_insertion_point(builder_scope:MajorCompactionTimestampRequest) + } + + static { + defaultInstance = new MajorCompactionTimestampRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:MajorCompactionTimestampRequest) + } + + public interface MajorCompactionTimestampForRegionRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .RegionSpecifier region = 1; + /** + * required .RegionSpecifier region = 1; + */ + boolean hasRegion(); + /** + * required .RegionSpecifier region = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegion(); + /** + * required .RegionSpecifier region = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder(); + } + /** + * Protobuf type {@code MajorCompactionTimestampForRegionRequest} + */ + public static final class MajorCompactionTimestampForRegionRequest extends + com.google.protobuf.GeneratedMessage + implements MajorCompactionTimestampForRegionRequestOrBuilder { + // Use MajorCompactionTimestampForRegionRequest.newBuilder() to construct. + private MajorCompactionTimestampForRegionRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private MajorCompactionTimestampForRegionRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final MajorCompactionTimestampForRegionRequest defaultInstance; + public static MajorCompactionTimestampForRegionRequest getDefaultInstance() { + return defaultInstance; + } + + public MajorCompactionTimestampForRegionRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private MajorCompactionTimestampForRegionRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = region_.toBuilder(); + } + region_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(region_); + region_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_MajorCompactionTimestampForRegionRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_MajorCompactionTimestampForRegionRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public MajorCompactionTimestampForRegionRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new MajorCompactionTimestampForRegionRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .RegionSpecifier region = 1; + public static final int REGION_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier region_; + /** + * required .RegionSpecifier region = 1; + */ + public boolean hasRegion() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegion() { + return region_; + } + /** + * required .RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder() { + return region_; + } + + private void initFields() { + region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasRegion()) { + memoizedIsInitialized = 0; + return false; + } + if (!getRegion().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, region_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, region_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest) obj; + + boolean result = true; + result = result && (hasRegion() == other.hasRegion()); + if (hasRegion()) { + result = result && getRegion() + .equals(other.getRegion()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasRegion()) { + hash = (37 * hash) + REGION_FIELD_NUMBER; + hash = (53 * hash) + getRegion().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code MajorCompactionTimestampForRegionRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_MajorCompactionTimestampForRegionRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_MajorCompactionTimestampForRegionRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getRegionFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (regionBuilder_ == null) { + region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); + } else { + regionBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_MajorCompactionTimestampForRegionRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (regionBuilder_ == null) { + result.region_ = region_; + } else { + result.region_ = regionBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.getDefaultInstance()) return this; + if (other.hasRegion()) { + mergeRegion(other.getRegion()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasRegion()) { + + return false; + } + if (!getRegion().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .RegionSpecifier region = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> regionBuilder_; + /** + * required .RegionSpecifier region = 1; + */ + public boolean hasRegion() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegion() { + if (regionBuilder_ == null) { + return region_; + } else { + return regionBuilder_.getMessage(); + } + } + /** + * required .RegionSpecifier region = 1; + */ + public Builder setRegion(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier value) { + if (regionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + region_ = value; + onChanged(); + } else { + regionBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .RegionSpecifier region = 1; + */ + public Builder setRegion( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder builderForValue) { + if (regionBuilder_ == null) { + region_ = builderForValue.build(); + onChanged(); + } else { + regionBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .RegionSpecifier region = 1; + */ + public Builder mergeRegion(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier value) { + if (regionBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + region_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance()) { + region_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.newBuilder(region_).mergeFrom(value).buildPartial(); + } else { + region_ = value; + } + onChanged(); + } else { + regionBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .RegionSpecifier region = 1; + */ + public Builder clearRegion() { + if (regionBuilder_ == null) { + region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); + onChanged(); + } else { + regionBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder getRegionBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getRegionFieldBuilder().getBuilder(); + } + /** + * required .RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder() { + if (regionBuilder_ != null) { + return regionBuilder_.getMessageOrBuilder(); + } else { + return region_; + } + } + /** + * required .RegionSpecifier region = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> + getRegionFieldBuilder() { + if (regionBuilder_ == null) { + regionBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder>( + region_, + getParentForChildren(), + isClean()); + region_ = null; + } + return regionBuilder_; + } + + // @@protoc_insertion_point(builder_scope:MajorCompactionTimestampForRegionRequest) + } + + static { + defaultInstance = new MajorCompactionTimestampForRegionRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:MajorCompactionTimestampForRegionRequest) + } + + public interface MajorCompactionTimestampResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required int64 compaction_timestamp = 1; + /** + * required int64 compaction_timestamp = 1; + */ + boolean hasCompactionTimestamp(); + /** + * required int64 compaction_timestamp = 1; + */ + long getCompactionTimestamp(); + } + /** + * Protobuf type {@code MajorCompactionTimestampResponse} + */ + public static final class MajorCompactionTimestampResponse extends + com.google.protobuf.GeneratedMessage + implements MajorCompactionTimestampResponseOrBuilder { + // Use MajorCompactionTimestampResponse.newBuilder() to construct. + private MajorCompactionTimestampResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private MajorCompactionTimestampResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final MajorCompactionTimestampResponse defaultInstance; + public static MajorCompactionTimestampResponse getDefaultInstance() { + return defaultInstance; + } + + public MajorCompactionTimestampResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private MajorCompactionTimestampResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + compactionTimestamp_ = input.readInt64(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_MajorCompactionTimestampResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_MajorCompactionTimestampResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public MajorCompactionTimestampResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new MajorCompactionTimestampResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required int64 compaction_timestamp = 1; + public static final int COMPACTION_TIMESTAMP_FIELD_NUMBER = 1; + private long compactionTimestamp_; + /** + * required int64 compaction_timestamp = 1; + */ + public boolean hasCompactionTimestamp() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required int64 compaction_timestamp = 1; + */ + public long getCompactionTimestamp() { + return compactionTimestamp_; + } + + private void initFields() { + compactionTimestamp_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasCompactionTimestamp()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeInt64(1, compactionTimestamp_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(1, compactionTimestamp_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse) obj; + + boolean result = true; + result = result && (hasCompactionTimestamp() == other.hasCompactionTimestamp()); + if (hasCompactionTimestamp()) { + result = result && (getCompactionTimestamp() + == other.getCompactionTimestamp()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasCompactionTimestamp()) { + hash = (37 * hash) + COMPACTION_TIMESTAMP_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getCompactionTimestamp()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code MajorCompactionTimestampResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_MajorCompactionTimestampResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_MajorCompactionTimestampResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + compactionTimestamp_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_MajorCompactionTimestampResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.compactionTimestamp_ = compactionTimestamp_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance()) return this; + if (other.hasCompactionTimestamp()) { + setCompactionTimestamp(other.getCompactionTimestamp()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasCompactionTimestamp()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required int64 compaction_timestamp = 1; + private long compactionTimestamp_ ; + /** + * required int64 compaction_timestamp = 1; + */ + public boolean hasCompactionTimestamp() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required int64 compaction_timestamp = 1; + */ + public long getCompactionTimestamp() { + return compactionTimestamp_; + } + /** + * required int64 compaction_timestamp = 1; + */ + public Builder setCompactionTimestamp(long value) { + bitField0_ |= 0x00000001; + compactionTimestamp_ = value; + onChanged(); + return this; + } + /** + * required int64 compaction_timestamp = 1; + */ + public Builder clearCompactionTimestamp() { + bitField0_ = (bitField0_ & ~0x00000001); + compactionTimestamp_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:MajorCompactionTimestampResponse) + } + + static { + defaultInstance = new MajorCompactionTimestampResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:MajorCompactionTimestampResponse) + } + /** * Protobuf service {@code MasterService} */ @@ -44954,6 +46518,30 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest request, com.google.protobuf.RpcCallback done); + /** + * rpc getLastMajorCompactionTimestamp(.MajorCompactionTimestampRequest) returns (.MajorCompactionTimestampResponse); + * + *
+       ** Returns the timestamp of the last major compaction 
+       * 
+ */ + public abstract void getLastMajorCompactionTimestamp( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc getLastMajorCompactionTimestampForRegion(.MajorCompactionTimestampForRegionRequest) returns (.MajorCompactionTimestampResponse); + * + *
+       ** Returns the timestamp of the last major compaction 
+       * 
+ */ + public abstract void getLastMajorCompactionTimestampForRegion( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest request, + com.google.protobuf.RpcCallback done); + } public static com.google.protobuf.Service newReflectiveService( @@ -45319,6 +46907,22 @@ public final class MasterProtos { impl.setQuota(controller, request, done); } + @java.lang.Override + public void getLastMajorCompactionTimestamp( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest request, + com.google.protobuf.RpcCallback done) { + impl.getLastMajorCompactionTimestamp(controller, request, done); + } + + @java.lang.Override + public void getLastMajorCompactionTimestampForRegion( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest request, + com.google.protobuf.RpcCallback done) { + impl.getLastMajorCompactionTimestampForRegion(controller, request, done); + } + }; } @@ -45431,6 +47035,10 @@ public final class MasterProtos { return impl.getTableState(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest)request); case 44: return impl.setQuota(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest)request); + case 45: + return impl.getLastMajorCompactionTimestamp(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest)request); + case 46: + return impl.getLastMajorCompactionTimestampForRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest)request); default: throw new java.lang.AssertionError("Can't get here."); } @@ -45535,6 +47143,10 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance(); case 44: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.getDefaultInstance(); + case 45: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.getDefaultInstance(); + case 46: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -45639,6 +47251,10 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(); case 44: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance(); + case 45: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(); + case 46: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -46213,6 +47829,30 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest request, com.google.protobuf.RpcCallback done); + /** + * rpc getLastMajorCompactionTimestamp(.MajorCompactionTimestampRequest) returns (.MajorCompactionTimestampResponse); + * + *
+     ** Returns the timestamp of the last major compaction 
+     * 
+ */ + public abstract void getLastMajorCompactionTimestamp( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc getLastMajorCompactionTimestampForRegion(.MajorCompactionTimestampForRegionRequest) returns (.MajorCompactionTimestampResponse); + * + *
+     ** Returns the timestamp of the last major compaction 
+     * 
+ */ + public abstract void getLastMajorCompactionTimestampForRegion( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest request, + com.google.protobuf.RpcCallback done); + public static final com.google.protobuf.Descriptors.ServiceDescriptor getDescriptor() { @@ -46460,6 +48100,16 @@ public final class MasterProtos { com.google.protobuf.RpcUtil.specializeCallback( done)); return; + case 45: + this.getLastMajorCompactionTimestamp(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 46: + this.getLastMajorCompactionTimestampForRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; default: throw new java.lang.AssertionError("Can't get here."); } @@ -46564,6 +48214,10 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance(); case 44: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.getDefaultInstance(); + case 45: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.getDefaultInstance(); + case 46: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -46668,6 +48322,10 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(); case 44: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance(); + case 45: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(); + case 46: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -47363,6 +49021,36 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance())); } + + public void getLastMajorCompactionTimestamp( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(45), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance())); + } + + public void getLastMajorCompactionTimestampForRegion( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(46), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance())); + } } public static BlockingInterface newBlockingStub( @@ -47595,6 +49283,16 @@ public final class MasterProtos { com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest request) throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse getLastMajorCompactionTimestamp( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse getLastMajorCompactionTimestampForRegion( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest request) + throws com.google.protobuf.ServiceException; } private static final class BlockingStub implements BlockingInterface { @@ -48143,6 +49841,30 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance()); } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse getLastMajorCompactionTimestamp( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(45), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse getLastMajorCompactionTimestampForRegion( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(46), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance()); + } + } // @@protoc_insertion_point(class_scope:MasterService) @@ -48578,6 +50300,21 @@ public final class MasterProtos { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_SetQuotaResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_MajorCompactionTimestampRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_MajorCompactionTimestampRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_MajorCompactionTimestampForRegionRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_MajorCompactionTimestampForRegionRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_MajorCompactionTimestampResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_MajorCompactionTimestampResponse_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { @@ -48707,88 +50444,99 @@ public final class MasterProtos { "\021\n\tnamespace\030\003 \001(\t\022\036\n\ntable_name\030\004 \001(\0132\n" + ".TableName\022\022\n\nremove_all\030\005 \001(\010\022\026\n\016bypass" + "_globals\030\006 \001(\010\022\"\n\010throttle\030\007 \001(\0132\020.Throt", - "tleRequest\"\022\n\020SetQuotaResponse2\346\030\n\rMaste" + - "rService\022S\n\024GetSchemaAlterStatus\022\034.GetSc" + - "hemaAlterStatusRequest\032\035.GetSchemaAlterS" + - "tatusResponse\022P\n\023GetTableDescriptors\022\033.G" + - "etTableDescriptorsRequest\032\034.GetTableDesc" + - "riptorsResponse\022>\n\rGetTableNames\022\025.GetTa" + - "bleNamesRequest\032\026.GetTableNamesResponse\022" + - "G\n\020GetClusterStatus\022\030.GetClusterStatusRe" + - "quest\032\031.GetClusterStatusResponse\022D\n\017IsMa" + - "sterRunning\022\027.IsMasterRunningRequest\032\030.I", - "sMasterRunningResponse\0222\n\tAddColumn\022\021.Ad" + - "dColumnRequest\032\022.AddColumnResponse\022;\n\014De" + - "leteColumn\022\024.DeleteColumnRequest\032\025.Delet" + - "eColumnResponse\022;\n\014ModifyColumn\022\024.Modify" + - "ColumnRequest\032\025.ModifyColumnResponse\0225\n\n" + - "MoveRegion\022\022.MoveRegionRequest\032\023.MoveReg" + - "ionResponse\022Y\n\026DispatchMergingRegions\022\036." + - "DispatchMergingRegionsRequest\032\037.Dispatch" + - "MergingRegionsResponse\022;\n\014AssignRegion\022\024" + - ".AssignRegionRequest\032\025.AssignRegionRespo", - "nse\022A\n\016UnassignRegion\022\026.UnassignRegionRe" + - "quest\032\027.UnassignRegionResponse\022>\n\rOfflin" + - "eRegion\022\025.OfflineRegionRequest\032\026.Offline" + - "RegionResponse\0228\n\013DeleteTable\022\023.DeleteTa" + - "bleRequest\032\024.DeleteTableResponse\022>\n\rtrun" + - "cateTable\022\025.TruncateTableRequest\032\026.Trunc" + - "ateTableResponse\0228\n\013EnableTable\022\023.Enable" + - "TableRequest\032\024.EnableTableResponse\022;\n\014Di" + - "sableTable\022\024.DisableTableRequest\032\025.Disab" + - "leTableResponse\0228\n\013ModifyTable\022\023.ModifyT", - "ableRequest\032\024.ModifyTableResponse\0228\n\013Cre" + - "ateTable\022\023.CreateTableRequest\032\024.CreateTa" + - "bleResponse\022/\n\010Shutdown\022\020.ShutdownReques" + - "t\032\021.ShutdownResponse\0225\n\nStopMaster\022\022.Sto" + - "pMasterRequest\032\023.StopMasterResponse\022,\n\007B" + - "alance\022\017.BalanceRequest\032\020.BalanceRespons" + - "e\022M\n\022SetBalancerRunning\022\032.SetBalancerRun" + - "ningRequest\032\033.SetBalancerRunningResponse" + - "\022A\n\016RunCatalogScan\022\026.RunCatalogScanReque" + - "st\032\027.RunCatalogScanResponse\022S\n\024EnableCat", - "alogJanitor\022\034.EnableCatalogJanitorReques" + - "t\032\035.EnableCatalogJanitorResponse\022\\\n\027IsCa" + - "talogJanitorEnabled\022\037.IsCatalogJanitorEn" + - "abledRequest\032 .IsCatalogJanitorEnabledRe" + - "sponse\022L\n\021ExecMasterService\022\032.Coprocesso" + - "rServiceRequest\032\033.CoprocessorServiceResp" + - "onse\022/\n\010Snapshot\022\020.SnapshotRequest\032\021.Sna" + - "pshotResponse\022V\n\025GetCompletedSnapshots\022\035" + - ".GetCompletedSnapshotsRequest\032\036.GetCompl" + - "etedSnapshotsResponse\022A\n\016DeleteSnapshot\022", - "\026.DeleteSnapshotRequest\032\027.DeleteSnapshot" + - "Response\022A\n\016IsSnapshotDone\022\026.IsSnapshotD" + - "oneRequest\032\027.IsSnapshotDoneResponse\022D\n\017R" + - "estoreSnapshot\022\027.RestoreSnapshotRequest\032" + - "\030.RestoreSnapshotResponse\022V\n\025IsRestoreSn" + - "apshotDone\022\035.IsRestoreSnapshotDoneReques" + - "t\032\036.IsRestoreSnapshotDoneResponse\022>\n\rExe" + - "cProcedure\022\025.ExecProcedureRequest\032\026.Exec" + - "ProcedureResponse\022E\n\024ExecProcedureWithRe" + - "t\022\025.ExecProcedureRequest\032\026.ExecProcedure", - "Response\022D\n\017IsProcedureDone\022\027.IsProcedur" + - "eDoneRequest\032\030.IsProcedureDoneResponse\022D" + - "\n\017ModifyNamespace\022\027.ModifyNamespaceReque" + - "st\032\030.ModifyNamespaceResponse\022D\n\017CreateNa" + - "mespace\022\027.CreateNamespaceRequest\032\030.Creat" + - "eNamespaceResponse\022D\n\017DeleteNamespace\022\027." + - "DeleteNamespaceRequest\032\030.DeleteNamespace" + - "Response\022Y\n\026GetNamespaceDescriptor\022\036.Get" + - "NamespaceDescriptorRequest\032\037.GetNamespac" + - "eDescriptorResponse\022_\n\030ListNamespaceDesc", - "riptors\022 .ListNamespaceDescriptorsReques" + - "t\032!.ListNamespaceDescriptorsResponse\022t\n\037" + - "ListTableDescriptorsByNamespace\022\'.ListTa" + - "bleDescriptorsByNamespaceRequest\032(.ListT" + - "ableDescriptorsByNamespaceResponse\022b\n\031Li" + - "stTableNamesByNamespace\022!.ListTableNames" + - "ByNamespaceRequest\032\".ListTableNamesByNam" + - "espaceResponse\022>\n\rGetTableState\022\025.GetTab" + - "leStateRequest\032\026.GetTableStateResponse\022/" + - "\n\010SetQuota\022\020.SetQuotaRequest\032\021.SetQuotaR", - "esponseBB\n*org.apache.hadoop.hbase.proto" + - "buf.generatedB\014MasterProtosH\001\210\001\001\240\001\001" + "tleRequest\"\022\n\020SetQuotaResponse\"A\n\037MajorC" + + "ompactionTimestampRequest\022\036\n\ntable_name\030" + + "\001 \002(\0132\n.TableName\"L\n(MajorCompactionTime" + + "stampForRegionRequest\022 \n\006region\030\001 \002(\0132\020." + + "RegionSpecifier\"@\n MajorCompactionTimest" + + "ampResponse\022\034\n\024compaction_timestamp\030\001 \002(" + + "\0032\310\032\n\rMasterService\022S\n\024GetSchemaAlterSta" + + "tus\022\034.GetSchemaAlterStatusRequest\032\035.GetS" + + "chemaAlterStatusResponse\022P\n\023GetTableDesc" + + "riptors\022\033.GetTableDescriptorsRequest\032\034.G", + "etTableDescriptorsResponse\022>\n\rGetTableNa" + + "mes\022\025.GetTableNamesRequest\032\026.GetTableNam" + + "esResponse\022G\n\020GetClusterStatus\022\030.GetClus" + + "terStatusRequest\032\031.GetClusterStatusRespo" + + "nse\022D\n\017IsMasterRunning\022\027.IsMasterRunning" + + "Request\032\030.IsMasterRunningResponse\0222\n\tAdd" + + "Column\022\021.AddColumnRequest\032\022.AddColumnRes" + + "ponse\022;\n\014DeleteColumn\022\024.DeleteColumnRequ" + + "est\032\025.DeleteColumnResponse\022;\n\014ModifyColu" + + "mn\022\024.ModifyColumnRequest\032\025.ModifyColumnR", + "esponse\0225\n\nMoveRegion\022\022.MoveRegionReques" + + "t\032\023.MoveRegionResponse\022Y\n\026DispatchMergin" + + "gRegions\022\036.DispatchMergingRegionsRequest" + + "\032\037.DispatchMergingRegionsResponse\022;\n\014Ass" + + "ignRegion\022\024.AssignRegionRequest\032\025.Assign" + + "RegionResponse\022A\n\016UnassignRegion\022\026.Unass" + + "ignRegionRequest\032\027.UnassignRegionRespons" + + "e\022>\n\rOfflineRegion\022\025.OfflineRegionReques" + + "t\032\026.OfflineRegionResponse\0228\n\013DeleteTable" + + "\022\023.DeleteTableRequest\032\024.DeleteTableRespo", + "nse\022>\n\rtruncateTable\022\025.TruncateTableRequ" + + "est\032\026.TruncateTableResponse\0228\n\013EnableTab" + + "le\022\023.EnableTableRequest\032\024.EnableTableRes" + + "ponse\022;\n\014DisableTable\022\024.DisableTableRequ" + + "est\032\025.DisableTableResponse\0228\n\013ModifyTabl" + + "e\022\023.ModifyTableRequest\032\024.ModifyTableResp" + + "onse\0228\n\013CreateTable\022\023.CreateTableRequest" + + "\032\024.CreateTableResponse\022/\n\010Shutdown\022\020.Shu" + + "tdownRequest\032\021.ShutdownResponse\0225\n\nStopM" + + "aster\022\022.StopMasterRequest\032\023.StopMasterRe", + "sponse\022,\n\007Balance\022\017.BalanceRequest\032\020.Bal" + + "anceResponse\022M\n\022SetBalancerRunning\022\032.Set" + + "BalancerRunningRequest\032\033.SetBalancerRunn" + + "ingResponse\022A\n\016RunCatalogScan\022\026.RunCatal" + + "ogScanRequest\032\027.RunCatalogScanResponse\022S" + + "\n\024EnableCatalogJanitor\022\034.EnableCatalogJa" + + "nitorRequest\032\035.EnableCatalogJanitorRespo" + + "nse\022\\\n\027IsCatalogJanitorEnabled\022\037.IsCatal" + + "ogJanitorEnabledRequest\032 .IsCatalogJanit" + + "orEnabledResponse\022L\n\021ExecMasterService\022\032", + ".CoprocessorServiceRequest\032\033.Coprocessor" + + "ServiceResponse\022/\n\010Snapshot\022\020.SnapshotRe" + + "quest\032\021.SnapshotResponse\022V\n\025GetCompleted" + + "Snapshots\022\035.GetCompletedSnapshotsRequest" + + "\032\036.GetCompletedSnapshotsResponse\022A\n\016Dele" + + "teSnapshot\022\026.DeleteSnapshotRequest\032\027.Del" + + "eteSnapshotResponse\022A\n\016IsSnapshotDone\022\026." + + "IsSnapshotDoneRequest\032\027.IsSnapshotDoneRe" + + "sponse\022D\n\017RestoreSnapshot\022\027.RestoreSnaps" + + "hotRequest\032\030.RestoreSnapshotResponse\022V\n\025", + "IsRestoreSnapshotDone\022\035.IsRestoreSnapsho" + + "tDoneRequest\032\036.IsRestoreSnapshotDoneResp" + + "onse\022>\n\rExecProcedure\022\025.ExecProcedureReq" + + "uest\032\026.ExecProcedureResponse\022E\n\024ExecProc" + + "edureWithRet\022\025.ExecProcedureRequest\032\026.Ex" + + "ecProcedureResponse\022D\n\017IsProcedureDone\022\027" + + ".IsProcedureDoneRequest\032\030.IsProcedureDon" + + "eResponse\022D\n\017ModifyNamespace\022\027.ModifyNam" + + "espaceRequest\032\030.ModifyNamespaceResponse\022" + + "D\n\017CreateNamespace\022\027.CreateNamespaceRequ", + "est\032\030.CreateNamespaceResponse\022D\n\017DeleteN" + + "amespace\022\027.DeleteNamespaceRequest\032\030.Dele" + + "teNamespaceResponse\022Y\n\026GetNamespaceDescr" + + "iptor\022\036.GetNamespaceDescriptorRequest\032\037." + + "GetNamespaceDescriptorResponse\022_\n\030ListNa" + + "mespaceDescriptors\022 .ListNamespaceDescri" + + "ptorsRequest\032!.ListNamespaceDescriptorsR" + + "esponse\022t\n\037ListTableDescriptorsByNamespa" + + "ce\022\'.ListTableDescriptorsByNamespaceRequ" + + "est\032(.ListTableDescriptorsByNamespaceRes", + "ponse\022b\n\031ListTableNamesByNamespace\022!.Lis" + + "tTableNamesByNamespaceRequest\032\".ListTabl" + + "eNamesByNamespaceResponse\022>\n\rGetTableSta" + + "te\022\025.GetTableStateRequest\032\026.GetTableStat" + + "eResponse\022/\n\010SetQuota\022\020.SetQuotaRequest\032" + + "\021.SetQuotaResponse\022f\n\037getLastMajorCompac" + + "tionTimestamp\022 .MajorCompactionTimestamp" + + "Request\032!.MajorCompactionTimestampRespon" + + "se\022x\n(getLastMajorCompactionTimestampFor" + + "Region\022).MajorCompactionTimestampForRegi", + "onRequest\032!.MajorCompactionTimestampResp" + + "onseBB\n*org.apache.hadoop.hbase.protobuf" + + ".generatedB\014MasterProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -49311,6 +51059,24 @@ public final class MasterProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_SetQuotaResponse_descriptor, new java.lang.String[] { }); + internal_static_MajorCompactionTimestampRequest_descriptor = + getDescriptor().getMessageTypes().get(86); + internal_static_MajorCompactionTimestampRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_MajorCompactionTimestampRequest_descriptor, + new java.lang.String[] { "TableName", }); + internal_static_MajorCompactionTimestampForRegionRequest_descriptor = + getDescriptor().getMessageTypes().get(87); + internal_static_MajorCompactionTimestampForRegionRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_MajorCompactionTimestampForRegionRequest_descriptor, + new java.lang.String[] { "Region", }); + internal_static_MajorCompactionTimestampResponse_descriptor = + getDescriptor().getMessageTypes().get(88); + internal_static_MajorCompactionTimestampResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_MajorCompactionTimestampResponse_descriptor, + new java.lang.String[] { "CompactionTimestamp", }); return null; } }; diff --git a/hbase-protocol/src/main/protobuf/ClusterStatus.proto b/hbase-protocol/src/main/protobuf/ClusterStatus.proto index 7e7839564df..2b2d9eb8ac0 100644 --- a/hbase-protocol/src/main/protobuf/ClusterStatus.proto +++ b/hbase-protocol/src/main/protobuf/ClusterStatus.proto @@ -113,6 +113,8 @@ message RegionLoad { /** The current data locality for region in the regionserver */ optional float data_locality = 16; + + optional uint64 last_major_compaction_ts = 17 [default = 0]; } /* Server-level protobufs */ diff --git a/hbase-protocol/src/main/protobuf/Master.proto b/hbase-protocol/src/main/protobuf/Master.proto index e55dcc0cd42..e7a3a999536 100644 --- a/hbase-protocol/src/main/protobuf/Master.proto +++ b/hbase-protocol/src/main/protobuf/Master.proto @@ -387,6 +387,18 @@ message SetQuotaRequest { message SetQuotaResponse { } +message MajorCompactionTimestampRequest { + required TableName table_name = 1; +} + +message MajorCompactionTimestampForRegionRequest { + required RegionSpecifier region = 1; +} + +message MajorCompactionTimestampResponse { + required int64 compaction_timestamp = 1; +} + service MasterService { /** Used by the client to get the number of regions that have received the updated schema */ rpc GetSchemaAlterStatus(GetSchemaAlterStatusRequest) @@ -601,4 +613,12 @@ service MasterService { /** Apply the new quota settings */ rpc SetQuota(SetQuotaRequest) returns(SetQuotaResponse); + + /** Returns the timestamp of the last major compaction */ + rpc getLastMajorCompactionTimestamp(MajorCompactionTimestampRequest) + returns(MajorCompactionTimestampResponse); + + /** Returns the timestamp of the last major compaction */ + rpc getLastMajorCompactionTimestampForRegion(MajorCompactionTimestampForRegionRequest) + returns(MajorCompactionTimestampResponse); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java index 2bef680e13b..52491e6b7cc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java @@ -148,6 +148,9 @@ public abstract class AbstractHFileWriter implements HFile.Writer { int avgValueLen = entryCount == 0 ? 0 : (int) (totalValueLength / entryCount); fileInfo.append(FileInfo.AVG_VALUE_LEN, Bytes.toBytes(avgValueLen), false); + + fileInfo.append(FileInfo.CREATE_TIME_TS, Bytes.toBytes(hFileContext.getFileCreateTime()), + false); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java index 1e97f63aa53..ad62d7186f3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java @@ -541,6 +541,7 @@ public class HFile { static final byte [] LASTKEY = Bytes.toBytes(RESERVED_PREFIX + "LASTKEY"); static final byte [] AVG_KEY_LEN = Bytes.toBytes(RESERVED_PREFIX + "AVG_KEY_LEN"); static final byte [] AVG_VALUE_LEN = Bytes.toBytes(RESERVED_PREFIX + "AVG_VALUE_LEN"); + static final byte [] CREATE_TIME_TS = Bytes.toBytes(RESERVED_PREFIX + "CREATE_TIME_TS"); static final byte [] COMPARATOR = Bytes.toBytes(RESERVED_PREFIX + "COMPARATOR"); static final byte [] TAGS_COMPRESSED = Bytes.toBytes(RESERVED_PREFIX + "TAGS_COMPRESSED"); public static final byte [] MAX_TAGS_LEN = Bytes.toBytes(RESERVED_PREFIX + "MAX_TAGS_LEN"); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java index e466041ce55..26cb6c95334 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java @@ -157,6 +157,7 @@ public class HFileReaderV2 extends AbstractHFileReader { // File info fileInfo = new FileInfo(); fileInfo.read(blockIter.nextBlockWithBlockType(BlockType.FILE_INFO).getByteStream()); + this.hfileContext.setFileCreateTime(Bytes.toLong(fileInfo.get(FileInfo.CREATE_TIME_TS))); lastKey = fileInfo.get(FileInfo.LASTKEY); avgKeyLen = Bytes.toInt(fileInfo.get(FileInfo.AVG_KEY_LEN)); avgValueLen = Bytes.toInt(fileInfo.get(FileInfo.AVG_VALUE_LEN)); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index e4881869568..a5394890da6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -2290,4 +2290,14 @@ public class HMaster extends HRegionServer implements MasterServices, Server { } } } + + @Override + public long getLastMajorCompactionTimestamp(TableName table) throws IOException { + return getClusterStatus().getLastMajorCompactionTsForTable(table); + } + + @Override + public long getLastMajorCompactionTimestampForRegion(byte[] regionName) throws IOException { + return getClusterStatus().getLastMajorCompactionTsForRegion(regionName); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index 6930bf312a0..0e8146122ed 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -47,14 +47,11 @@ import org.apache.hadoop.hbase.procedure.MasterProcedureManager; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.RequestConverter; import org.apache.hadoop.hbase.protobuf.ResponseConverter; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; -import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.protobuf.generated.*; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionRequest; @@ -111,6 +108,9 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescript import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnResponse; @@ -1275,4 +1275,35 @@ public class MasterRpcServices extends RSRpcServices throw new ServiceException(e); } } + + @Override + public MajorCompactionTimestampResponse getLastMajorCompactionTimestamp(RpcController controller, + MajorCompactionTimestampRequest request) throws ServiceException { + MajorCompactionTimestampResponse.Builder response = + MajorCompactionTimestampResponse.newBuilder(); + try { + master.checkInitialized(); + response.setCompactionTimestamp(master.getLastMajorCompactionTimestamp(ProtobufUtil + .toTableName(request.getTableName()))); + } catch (IOException e) { + throw new ServiceException(e); + } + return response.build(); + } + + @Override + public MajorCompactionTimestampResponse getLastMajorCompactionTimestampForRegion( + RpcController controller, MajorCompactionTimestampForRegionRequest request) + throws ServiceException { + MajorCompactionTimestampResponse.Builder response = + MajorCompactionTimestampResponse.newBuilder(); + try { + master.checkInitialized(); + response.setCompactionTimestamp(master.getLastMajorCompactionTimestampForRegion(request + .getRegion().getValue().toByteArray())); + } catch (IOException e) { + throw new ServiceException(e); + } + return response.build(); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java index 77332569e98..63f311960cd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java @@ -261,4 +261,20 @@ public interface MasterServices extends Server { * @throws IOException */ public List listTableNamesByNamespace(String name) throws IOException; + + /** + * @param table + * @return the timestamp of the last successful major compaction for the passed table, + * or 0 if no HFile resulting from a major compaction exists + * @throws IOException + */ + public long getLastMajorCompactionTimestamp(TableName table) throws IOException; + + /** + * @param regionName + * @return the timestamp of the last successful major compaction for the passed region + * or 0 if no HFile resulting from a major compaction exists + * @throws IOException + */ + public long getLastMajorCompactionTimestampForRegion(byte[] regionName) throws IOException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 8e44b395703..26f8943d731 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -117,6 +117,7 @@ import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.io.hfile.BlockCache; import org.apache.hadoop.hbase.io.hfile.CacheConfig; +import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.ipc.CallerDisconnectedException; import org.apache.hadoop.hbase.ipc.RpcCallContext; import org.apache.hadoop.hbase.ipc.RpcServer; @@ -1494,6 +1495,28 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // return Collections.min(lastStoreFlushTimeMap.values()); } + /** + * This can be used to determine the last time all files of this region were major compacted. + * @param majorCompactioOnly Only consider HFile that are the result of major compaction + * @return the timestamp of the oldest HFile for all stores of this region + */ + public long getOldestHfileTs(boolean majorCompactioOnly) throws IOException { + long result = Long.MAX_VALUE; + for (Store store : getStores().values()) { + for (StoreFile file : store.getStorefiles()) { + HFile.Reader reader = file.getReader().getHFileReader(); + if (majorCompactioOnly) { + byte[] val = reader.loadFileInfo().get(StoreFile.MAJOR_COMPACTION_KEY); + if (val == null || !Bytes.toBoolean(val)) { + continue; + } + } + result = Math.min(result, reader.getFileContext().getFileCreateTime()); + } + } + return result == Long.MAX_VALUE ? 0 : result; + } + ////////////////////////////////////////////////////////////////////////////// // HRegion maintenance. // diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 5d6693385ac..bc52eb84d91 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -1341,7 +1341,7 @@ public class HRegionServer extends HasThread implements * @throws IOException */ private RegionLoad createRegionLoad(final HRegion r, RegionLoad.Builder regionLoadBldr, - RegionSpecifier.Builder regionSpecifier) { + RegionSpecifier.Builder regionSpecifier) throws IOException { byte[] name = r.getRegionName(); int stores = 0; int storefiles = 0; @@ -1403,8 +1403,8 @@ public class HRegionServer extends HasThread implements .setTotalCompactingKVs(totalCompactingKVs) .setCurrentCompactedKVs(currentCompactedKVs) .setCompleteSequenceId(r.maxFlushedSeqId) - .setDataLocality(dataLocality); - + .setDataLocality(dataLocality) + .setLastMajorCompactionTs(r.getOldestHfileTs(true)); return regionLoadBldr.build(); } @@ -1412,7 +1412,7 @@ public class HRegionServer extends HasThread implements * @param encodedRegionName * @return An instance of RegionLoad. */ - public RegionLoad createRegionLoad(final String encodedRegionName) { + public RegionLoad createRegionLoad(final String encodedRegionName) throws IOException { HRegion r = null; r = this.onlineRegions.get(encodedRegionName); return r != null ? createRegionLoad(r, null, null) : null; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index 047d689f3f1..942b47f7164 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -991,6 +991,7 @@ public class HStore implements Store { .withHBaseCheckSum(true) .withDataBlockEncoding(family.getDataBlockEncoding()) .withEncryptionContext(cryptoContext) + .withCreateTime(EnvironmentEdgeManager.currentTime()) .build(); return hFileContext; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java index 9b55acd2d8f..85fbbc6ba70 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java @@ -408,6 +408,58 @@ public class TestAdmin1 { table.close(); } + @Test (timeout=300000) + public void testCompactionTimestamps() throws Exception { + HColumnDescriptor fam1 = new HColumnDescriptor("fam1"); + TableName tableName = TableName.valueOf("testCompactionTimestampsTable"); + HTableDescriptor htd = new HTableDescriptor(tableName); + htd.addFamily(fam1); + this.admin.createTable(htd); + HTable table = (HTable)TEST_UTIL.getConnection().getTable(htd.getTableName()); + long ts = this.admin.getLastMajorCompactionTimestamp(tableName); + assertEquals(0, ts); + Put p = new Put(Bytes.toBytes("row1")); + p.add(Bytes.toBytes("fam1"), Bytes.toBytes("fam1"), Bytes.toBytes("fam1")); + table.put(p); + ts = this.admin.getLastMajorCompactionTimestamp(tableName); + // no files written -> no data + assertEquals(0, ts); + + this.admin.flush(tableName); + ts = this.admin.getLastMajorCompactionTimestamp(tableName); + // still 0, we flushed a file, but no major compaction happened + assertEquals(0, ts); + + byte[] regionName = + table.getRegionLocator().getAllRegionLocations().get(0).getRegionInfo().getRegionName(); + long ts1 = this.admin.getLastMajorCompactionTimestampForRegion(regionName); + assertEquals(ts, ts1); + p = new Put(Bytes.toBytes("row2")); + p.add(Bytes.toBytes("fam1"), Bytes.toBytes("fam1"), Bytes.toBytes("fam1")); + table.put(p); + this.admin.flush(tableName); + ts = this.admin.getLastMajorCompactionTimestamp(tableName); + // make sure the region API returns the same value, as the old file is still around + assertEquals(ts1, ts); + + TEST_UTIL.compact(tableName, true); + table.put(p); + // forces a wait for the compaction + this.admin.flush(tableName); + ts = this.admin.getLastMajorCompactionTimestamp(tableName); + // after a compaction our earliest timestamp will have progressed forward + assertTrue(ts > ts1); + + // region api still the same + ts1 = this.admin.getLastMajorCompactionTimestampForRegion(regionName); + assertEquals(ts, ts1); + table.put(p); + this.admin.flush(tableName); + ts = this.admin.getLastMajorCompactionTimestamp(tableName); + assertEquals(ts, ts1); + table.close(); + } + @Test (timeout=300000) public void testHColumnValidName() { boolean exceptionThrown; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java index cc501ededd3..fb7752ed9e5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java @@ -458,6 +458,18 @@ public class TestCatalogJanitor { // Auto-generated method stub return false; } + + @Override + public long getLastMajorCompactionTimestamp(TableName table) throws IOException { + // Auto-generated method stub + return 0; + } + + @Override + public long getLastMajorCompactionTimestampForRegion(byte[] regionName) throws IOException { + // Auto-generated method stub + return 0; + } } @Test