diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java index a70b639035d..f7531eec052 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java @@ -87,6 +87,7 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionRequest import org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos; import org.apache.hadoop.hbase.protobuf.generated.CellProtos; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; +import org.apache.hadoop.hbase.protobuf.generated.WALProtos; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileResponse; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService; @@ -117,6 +118,8 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor; +import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor; +import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.FlushAction; import org.apache.hadoop.hbase.security.access.Permission; import org.apache.hadoop.hbase.security.access.TablePermission; import org.apache.hadoop.hbase.security.access.UserPermission; @@ -2499,6 +2502,29 @@ public final class ProtobufUtil { return builder.build(); } + public static FlushDescriptor toFlushDescriptor(FlushAction action, HRegionInfo hri, + long flushSeqId, Map> committedFiles) { + FlushDescriptor.Builder desc = FlushDescriptor.newBuilder() + .setAction(action) + .setEncodedRegionName(ByteStringer.wrap(hri.getEncodedNameAsBytes())) + .setFlushSequenceNumber(flushSeqId) + .setTableName(ByteStringer.wrap(hri.getTable().getName())); + + for (Map.Entry> entry : committedFiles.entrySet()) { + WALProtos.FlushDescriptor.StoreFlushDescriptor.Builder builder = + WALProtos.FlushDescriptor.StoreFlushDescriptor.newBuilder() + .setFamilyName(ByteStringer.wrap(entry.getKey())) + .setStoreHomeDir(Bytes.toString(entry.getKey())); //relative to region + if (entry.getValue() != null) { + for (Path path : entry.getValue()) { + builder.addFlushOutput(path.getName()); + } + } + desc.addStoreFlushes(builder); + } + return desc.build(); + } + /** * Return short version of Message toString'd, shorter than TextFormat#shortDebugString. * Tries to NOT print out data both because it can be big but also so we do not have data in our diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/WALProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/WALProtos.java index c6d6a190aa7..19f5690c4cb 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/WALProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/WALProtos.java @@ -3674,19 +3674,35 @@ public final class WALProtos { // repeated string compaction_input = 4; /** * repeated string compaction_input = 4; + * + *
+     * relative to store dir
+     * 
*/ java.util.List getCompactionInputList(); /** * repeated string compaction_input = 4; + * + *
+     * relative to store dir
+     * 
*/ int getCompactionInputCount(); /** * repeated string compaction_input = 4; + * + *
+     * relative to store dir
+     * 
*/ java.lang.String getCompactionInput(int index); /** * repeated string compaction_input = 4; + * + *
+     * relative to store dir
+     * 
*/ com.google.protobuf.ByteString getCompactionInputBytes(int index); @@ -3714,14 +3730,26 @@ public final class WALProtos { // required string store_home_dir = 6; /** * required string store_home_dir = 6; + * + *
+     * relative to region dir
+     * 
*/ boolean hasStoreHomeDir(); /** * required string store_home_dir = 6; + * + *
+     * relative to region dir
+     * 
*/ java.lang.String getStoreHomeDir(); /** * required string store_home_dir = 6; + * + *
+     * relative to region dir
+     * 
*/ com.google.protobuf.ByteString getStoreHomeDirBytes(); @@ -3951,6 +3979,10 @@ public final class WALProtos { private com.google.protobuf.LazyStringList compactionInput_; /** * repeated string compaction_input = 4; + * + *
+     * relative to store dir
+     * 
*/ public java.util.List getCompactionInputList() { @@ -3958,18 +3990,30 @@ public final class WALProtos { } /** * repeated string compaction_input = 4; + * + *
+     * relative to store dir
+     * 
*/ public int getCompactionInputCount() { return compactionInput_.size(); } /** * repeated string compaction_input = 4; + * + *
+     * relative to store dir
+     * 
*/ public java.lang.String getCompactionInput(int index) { return compactionInput_.get(index); } /** * repeated string compaction_input = 4; + * + *
+     * relative to store dir
+     * 
*/ public com.google.protobuf.ByteString getCompactionInputBytes(int index) { @@ -4011,12 +4055,20 @@ public final class WALProtos { private java.lang.Object storeHomeDir_; /** * required string store_home_dir = 6; + * + *
+     * relative to region dir
+     * 
*/ public boolean hasStoreHomeDir() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * required string store_home_dir = 6; + * + *
+     * relative to region dir
+     * 
*/ public java.lang.String getStoreHomeDir() { java.lang.Object ref = storeHomeDir_; @@ -4034,6 +4086,10 @@ public final class WALProtos { } /** * required string store_home_dir = 6; + * + *
+     * relative to region dir
+     * 
*/ public com.google.protobuf.ByteString getStoreHomeDirBytes() { @@ -4692,6 +4748,10 @@ public final class WALProtos { } /** * repeated string compaction_input = 4; + * + *
+       * relative to store dir
+       * 
*/ public java.util.List getCompactionInputList() { @@ -4699,18 +4759,30 @@ public final class WALProtos { } /** * repeated string compaction_input = 4; + * + *
+       * relative to store dir
+       * 
*/ public int getCompactionInputCount() { return compactionInput_.size(); } /** * repeated string compaction_input = 4; + * + *
+       * relative to store dir
+       * 
*/ public java.lang.String getCompactionInput(int index) { return compactionInput_.get(index); } /** * repeated string compaction_input = 4; + * + *
+       * relative to store dir
+       * 
*/ public com.google.protobuf.ByteString getCompactionInputBytes(int index) { @@ -4718,6 +4790,10 @@ public final class WALProtos { } /** * repeated string compaction_input = 4; + * + *
+       * relative to store dir
+       * 
*/ public Builder setCompactionInput( int index, java.lang.String value) { @@ -4731,6 +4807,10 @@ public final class WALProtos { } /** * repeated string compaction_input = 4; + * + *
+       * relative to store dir
+       * 
*/ public Builder addCompactionInput( java.lang.String value) { @@ -4744,6 +4824,10 @@ public final class WALProtos { } /** * repeated string compaction_input = 4; + * + *
+       * relative to store dir
+       * 
*/ public Builder addAllCompactionInput( java.lang.Iterable values) { @@ -4754,6 +4838,10 @@ public final class WALProtos { } /** * repeated string compaction_input = 4; + * + *
+       * relative to store dir
+       * 
*/ public Builder clearCompactionInput() { compactionInput_ = com.google.protobuf.LazyStringArrayList.EMPTY; @@ -4763,6 +4851,10 @@ public final class WALProtos { } /** * repeated string compaction_input = 4; + * + *
+       * relative to store dir
+       * 
*/ public Builder addCompactionInputBytes( com.google.protobuf.ByteString value) { @@ -4872,12 +4964,20 @@ public final class WALProtos { private java.lang.Object storeHomeDir_ = ""; /** * required string store_home_dir = 6; + * + *
+       * relative to region dir
+       * 
*/ public boolean hasStoreHomeDir() { return ((bitField0_ & 0x00000020) == 0x00000020); } /** * required string store_home_dir = 6; + * + *
+       * relative to region dir
+       * 
*/ public java.lang.String getStoreHomeDir() { java.lang.Object ref = storeHomeDir_; @@ -4892,6 +4992,10 @@ public final class WALProtos { } /** * required string store_home_dir = 6; + * + *
+       * relative to region dir
+       * 
*/ public com.google.protobuf.ByteString getStoreHomeDirBytes() { @@ -4908,6 +5012,10 @@ public final class WALProtos { } /** * required string store_home_dir = 6; + * + *
+       * relative to region dir
+       * 
*/ public Builder setStoreHomeDir( java.lang.String value) { @@ -4921,6 +5029,10 @@ public final class WALProtos { } /** * required string store_home_dir = 6; + * + *
+       * relative to region dir
+       * 
*/ public Builder clearStoreHomeDir() { bitField0_ = (bitField0_ & ~0x00000020); @@ -4930,6 +5042,10 @@ public final class WALProtos { } /** * required string store_home_dir = 6; + * + *
+       * relative to region dir
+       * 
*/ public Builder setStoreHomeDirBytes( com.google.protobuf.ByteString value) { @@ -5005,6 +5121,2155 @@ public final class WALProtos { // @@protoc_insertion_point(class_scope:CompactionDescriptor) } + public interface FlushDescriptorOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .FlushDescriptor.FlushAction action = 1; + /** + * required .FlushDescriptor.FlushAction action = 1; + */ + boolean hasAction(); + /** + * required .FlushDescriptor.FlushAction action = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.FlushAction getAction(); + + // required bytes table_name = 2; + /** + * required bytes table_name = 2; + */ + boolean hasTableName(); + /** + * required bytes table_name = 2; + */ + com.google.protobuf.ByteString getTableName(); + + // required bytes encoded_region_name = 3; + /** + * required bytes encoded_region_name = 3; + */ + boolean hasEncodedRegionName(); + /** + * required bytes encoded_region_name = 3; + */ + com.google.protobuf.ByteString getEncodedRegionName(); + + // optional uint64 flush_sequence_number = 4; + /** + * optional uint64 flush_sequence_number = 4; + */ + boolean hasFlushSequenceNumber(); + /** + * optional uint64 flush_sequence_number = 4; + */ + long getFlushSequenceNumber(); + + // repeated .FlushDescriptor.StoreFlushDescriptor store_flushes = 5; + /** + * repeated .FlushDescriptor.StoreFlushDescriptor store_flushes = 5; + */ + java.util.List + getStoreFlushesList(); + /** + * repeated .FlushDescriptor.StoreFlushDescriptor store_flushes = 5; + */ + org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor getStoreFlushes(int index); + /** + * repeated .FlushDescriptor.StoreFlushDescriptor store_flushes = 5; + */ + int getStoreFlushesCount(); + /** + * repeated .FlushDescriptor.StoreFlushDescriptor store_flushes = 5; + */ + java.util.List + getStoreFlushesOrBuilderList(); + /** + * repeated .FlushDescriptor.StoreFlushDescriptor store_flushes = 5; + */ + org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptorOrBuilder getStoreFlushesOrBuilder( + int index); + } + /** + * Protobuf type {@code FlushDescriptor} + * + *
+   **
+   * Special WAL entry to hold all related to a flush.
+   * 
+ */ + public static final class FlushDescriptor extends + com.google.protobuf.GeneratedMessage + implements FlushDescriptorOrBuilder { + // Use FlushDescriptor.newBuilder() to construct. + private FlushDescriptor(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private FlushDescriptor(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final FlushDescriptor defaultInstance; + public static FlushDescriptor getDefaultInstance() { + return defaultInstance; + } + + public FlushDescriptor getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private FlushDescriptor( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.FlushAction value = org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.FlushAction.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(1, rawValue); + } else { + bitField0_ |= 0x00000001; + action_ = value; + } + break; + } + case 18: { + bitField0_ |= 0x00000002; + tableName_ = input.readBytes(); + break; + } + case 26: { + bitField0_ |= 0x00000004; + encodedRegionName_ = input.readBytes(); + break; + } + case 32: { + bitField0_ |= 0x00000008; + flushSequenceNumber_ = input.readUInt64(); + break; + } + case 42: { + if (!((mutable_bitField0_ & 0x00000010) == 0x00000010)) { + storeFlushes_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000010; + } + storeFlushes_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000010) == 0x00000010)) { + storeFlushes_ = java.util.Collections.unmodifiableList(storeFlushes_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.WALProtos.internal_static_FlushDescriptor_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.WALProtos.internal_static_FlushDescriptor_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.class, org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public FlushDescriptor parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new FlushDescriptor(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + /** + * Protobuf enum {@code FlushDescriptor.FlushAction} + */ + public enum FlushAction + implements com.google.protobuf.ProtocolMessageEnum { + /** + * START_FLUSH = 0; + */ + START_FLUSH(0, 0), + /** + * COMMIT_FLUSH = 1; + */ + COMMIT_FLUSH(1, 1), + /** + * ABORT_FLUSH = 2; + */ + ABORT_FLUSH(2, 2), + ; + + /** + * START_FLUSH = 0; + */ + public static final int START_FLUSH_VALUE = 0; + /** + * COMMIT_FLUSH = 1; + */ + public static final int COMMIT_FLUSH_VALUE = 1; + /** + * ABORT_FLUSH = 2; + */ + public static final int ABORT_FLUSH_VALUE = 2; + + + public final int getNumber() { return value; } + + public static FlushAction valueOf(int value) { + switch (value) { + case 0: return START_FLUSH; + case 1: return COMMIT_FLUSH; + case 2: return ABORT_FLUSH; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public FlushAction findValueByNumber(int number) { + return FlushAction.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.getDescriptor().getEnumTypes().get(0); + } + + private static final FlushAction[] VALUES = values(); + + public static FlushAction valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private FlushAction(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:FlushDescriptor.FlushAction) + } + + public interface StoreFlushDescriptorOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required bytes family_name = 1; + /** + * required bytes family_name = 1; + */ + boolean hasFamilyName(); + /** + * required bytes family_name = 1; + */ + com.google.protobuf.ByteString getFamilyName(); + + // required string store_home_dir = 2; + /** + * required string store_home_dir = 2; + * + *
+       *relative to region dir
+       * 
+ */ + boolean hasStoreHomeDir(); + /** + * required string store_home_dir = 2; + * + *
+       *relative to region dir
+       * 
+ */ + java.lang.String getStoreHomeDir(); + /** + * required string store_home_dir = 2; + * + *
+       *relative to region dir
+       * 
+ */ + com.google.protobuf.ByteString + getStoreHomeDirBytes(); + + // repeated string flush_output = 3; + /** + * repeated string flush_output = 3; + * + *
+       * relative to store dir (if this is a COMMIT_FLUSH)
+       * 
+ */ + java.util.List + getFlushOutputList(); + /** + * repeated string flush_output = 3; + * + *
+       * relative to store dir (if this is a COMMIT_FLUSH)
+       * 
+ */ + int getFlushOutputCount(); + /** + * repeated string flush_output = 3; + * + *
+       * relative to store dir (if this is a COMMIT_FLUSH)
+       * 
+ */ + java.lang.String getFlushOutput(int index); + /** + * repeated string flush_output = 3; + * + *
+       * relative to store dir (if this is a COMMIT_FLUSH)
+       * 
+ */ + com.google.protobuf.ByteString + getFlushOutputBytes(int index); + } + /** + * Protobuf type {@code FlushDescriptor.StoreFlushDescriptor} + */ + public static final class StoreFlushDescriptor extends + com.google.protobuf.GeneratedMessage + implements StoreFlushDescriptorOrBuilder { + // Use StoreFlushDescriptor.newBuilder() to construct. + private StoreFlushDescriptor(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private StoreFlushDescriptor(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final StoreFlushDescriptor defaultInstance; + public static StoreFlushDescriptor getDefaultInstance() { + return defaultInstance; + } + + public StoreFlushDescriptor getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private StoreFlushDescriptor( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + familyName_ = input.readBytes(); + break; + } + case 18: { + bitField0_ |= 0x00000002; + storeHomeDir_ = input.readBytes(); + break; + } + case 26: { + if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + flushOutput_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000004; + } + flushOutput_.add(input.readBytes()); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + flushOutput_ = new com.google.protobuf.UnmodifiableLazyStringList(flushOutput_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.WALProtos.internal_static_FlushDescriptor_StoreFlushDescriptor_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.WALProtos.internal_static_FlushDescriptor_StoreFlushDescriptor_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor.class, org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public StoreFlushDescriptor parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new StoreFlushDescriptor(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required bytes family_name = 1; + public static final int FAMILY_NAME_FIELD_NUMBER = 1; + private com.google.protobuf.ByteString familyName_; + /** + * required bytes family_name = 1; + */ + public boolean hasFamilyName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bytes family_name = 1; + */ + public com.google.protobuf.ByteString getFamilyName() { + return familyName_; + } + + // required string store_home_dir = 2; + public static final int STORE_HOME_DIR_FIELD_NUMBER = 2; + private java.lang.Object storeHomeDir_; + /** + * required string store_home_dir = 2; + * + *
+       *relative to region dir
+       * 
+ */ + public boolean hasStoreHomeDir() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string store_home_dir = 2; + * + *
+       *relative to region dir
+       * 
+ */ + public java.lang.String getStoreHomeDir() { + java.lang.Object ref = storeHomeDir_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + storeHomeDir_ = s; + } + return s; + } + } + /** + * required string store_home_dir = 2; + * + *
+       *relative to region dir
+       * 
+ */ + public com.google.protobuf.ByteString + getStoreHomeDirBytes() { + java.lang.Object ref = storeHomeDir_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + storeHomeDir_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // repeated string flush_output = 3; + public static final int FLUSH_OUTPUT_FIELD_NUMBER = 3; + private com.google.protobuf.LazyStringList flushOutput_; + /** + * repeated string flush_output = 3; + * + *
+       * relative to store dir (if this is a COMMIT_FLUSH)
+       * 
+ */ + public java.util.List + getFlushOutputList() { + return flushOutput_; + } + /** + * repeated string flush_output = 3; + * + *
+       * relative to store dir (if this is a COMMIT_FLUSH)
+       * 
+ */ + public int getFlushOutputCount() { + return flushOutput_.size(); + } + /** + * repeated string flush_output = 3; + * + *
+       * relative to store dir (if this is a COMMIT_FLUSH)
+       * 
+ */ + public java.lang.String getFlushOutput(int index) { + return flushOutput_.get(index); + } + /** + * repeated string flush_output = 3; + * + *
+       * relative to store dir (if this is a COMMIT_FLUSH)
+       * 
+ */ + public com.google.protobuf.ByteString + getFlushOutputBytes(int index) { + return flushOutput_.getByteString(index); + } + + private void initFields() { + familyName_ = com.google.protobuf.ByteString.EMPTY; + storeHomeDir_ = ""; + flushOutput_ = com.google.protobuf.LazyStringArrayList.EMPTY; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasFamilyName()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasStoreHomeDir()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, familyName_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getStoreHomeDirBytes()); + } + for (int i = 0; i < flushOutput_.size(); i++) { + output.writeBytes(3, flushOutput_.getByteString(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, familyName_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getStoreHomeDirBytes()); + } + { + int dataSize = 0; + for (int i = 0; i < flushOutput_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream + .computeBytesSizeNoTag(flushOutput_.getByteString(i)); + } + size += dataSize; + size += 1 * getFlushOutputList().size(); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor other = (org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor) obj; + + boolean result = true; + result = result && (hasFamilyName() == other.hasFamilyName()); + if (hasFamilyName()) { + result = result && getFamilyName() + .equals(other.getFamilyName()); + } + result = result && (hasStoreHomeDir() == other.hasStoreHomeDir()); + if (hasStoreHomeDir()) { + result = result && getStoreHomeDir() + .equals(other.getStoreHomeDir()); + } + result = result && getFlushOutputList() + .equals(other.getFlushOutputList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasFamilyName()) { + hash = (37 * hash) + FAMILY_NAME_FIELD_NUMBER; + hash = (53 * hash) + getFamilyName().hashCode(); + } + if (hasStoreHomeDir()) { + hash = (37 * hash) + STORE_HOME_DIR_FIELD_NUMBER; + hash = (53 * hash) + getStoreHomeDir().hashCode(); + } + if (getFlushOutputCount() > 0) { + hash = (37 * hash) + FLUSH_OUTPUT_FIELD_NUMBER; + hash = (53 * hash) + getFlushOutputList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code FlushDescriptor.StoreFlushDescriptor} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptorOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.WALProtos.internal_static_FlushDescriptor_StoreFlushDescriptor_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.WALProtos.internal_static_FlushDescriptor_StoreFlushDescriptor_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor.class, org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + familyName_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + storeHomeDir_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + flushOutput_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.WALProtos.internal_static_FlushDescriptor_StoreFlushDescriptor_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor build() { + org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor result = new org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.familyName_ = familyName_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.storeHomeDir_ = storeHomeDir_; + if (((bitField0_ & 0x00000004) == 0x00000004)) { + flushOutput_ = new com.google.protobuf.UnmodifiableLazyStringList( + flushOutput_); + bitField0_ = (bitField0_ & ~0x00000004); + } + result.flushOutput_ = flushOutput_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor.getDefaultInstance()) return this; + if (other.hasFamilyName()) { + setFamilyName(other.getFamilyName()); + } + if (other.hasStoreHomeDir()) { + bitField0_ |= 0x00000002; + storeHomeDir_ = other.storeHomeDir_; + onChanged(); + } + if (!other.flushOutput_.isEmpty()) { + if (flushOutput_.isEmpty()) { + flushOutput_ = other.flushOutput_; + bitField0_ = (bitField0_ & ~0x00000004); + } else { + ensureFlushOutputIsMutable(); + flushOutput_.addAll(other.flushOutput_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasFamilyName()) { + + return false; + } + if (!hasStoreHomeDir()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required bytes family_name = 1; + private com.google.protobuf.ByteString familyName_ = com.google.protobuf.ByteString.EMPTY; + /** + * required bytes family_name = 1; + */ + public boolean hasFamilyName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bytes family_name = 1; + */ + public com.google.protobuf.ByteString getFamilyName() { + return familyName_; + } + /** + * required bytes family_name = 1; + */ + public Builder setFamilyName(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + familyName_ = value; + onChanged(); + return this; + } + /** + * required bytes family_name = 1; + */ + public Builder clearFamilyName() { + bitField0_ = (bitField0_ & ~0x00000001); + familyName_ = getDefaultInstance().getFamilyName(); + onChanged(); + return this; + } + + // required string store_home_dir = 2; + private java.lang.Object storeHomeDir_ = ""; + /** + * required string store_home_dir = 2; + * + *
+         *relative to region dir
+         * 
+ */ + public boolean hasStoreHomeDir() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string store_home_dir = 2; + * + *
+         *relative to region dir
+         * 
+ */ + public java.lang.String getStoreHomeDir() { + java.lang.Object ref = storeHomeDir_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + storeHomeDir_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string store_home_dir = 2; + * + *
+         *relative to region dir
+         * 
+ */ + public com.google.protobuf.ByteString + getStoreHomeDirBytes() { + java.lang.Object ref = storeHomeDir_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + storeHomeDir_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string store_home_dir = 2; + * + *
+         *relative to region dir
+         * 
+ */ + public Builder setStoreHomeDir( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + storeHomeDir_ = value; + onChanged(); + return this; + } + /** + * required string store_home_dir = 2; + * + *
+         *relative to region dir
+         * 
+ */ + public Builder clearStoreHomeDir() { + bitField0_ = (bitField0_ & ~0x00000002); + storeHomeDir_ = getDefaultInstance().getStoreHomeDir(); + onChanged(); + return this; + } + /** + * required string store_home_dir = 2; + * + *
+         *relative to region dir
+         * 
+ */ + public Builder setStoreHomeDirBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + storeHomeDir_ = value; + onChanged(); + return this; + } + + // repeated string flush_output = 3; + private com.google.protobuf.LazyStringList flushOutput_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensureFlushOutputIsMutable() { + if (!((bitField0_ & 0x00000004) == 0x00000004)) { + flushOutput_ = new com.google.protobuf.LazyStringArrayList(flushOutput_); + bitField0_ |= 0x00000004; + } + } + /** + * repeated string flush_output = 3; + * + *
+         * relative to store dir (if this is a COMMIT_FLUSH)
+         * 
+ */ + public java.util.List + getFlushOutputList() { + return java.util.Collections.unmodifiableList(flushOutput_); + } + /** + * repeated string flush_output = 3; + * + *
+         * relative to store dir (if this is a COMMIT_FLUSH)
+         * 
+ */ + public int getFlushOutputCount() { + return flushOutput_.size(); + } + /** + * repeated string flush_output = 3; + * + *
+         * relative to store dir (if this is a COMMIT_FLUSH)
+         * 
+ */ + public java.lang.String getFlushOutput(int index) { + return flushOutput_.get(index); + } + /** + * repeated string flush_output = 3; + * + *
+         * relative to store dir (if this is a COMMIT_FLUSH)
+         * 
+ */ + public com.google.protobuf.ByteString + getFlushOutputBytes(int index) { + return flushOutput_.getByteString(index); + } + /** + * repeated string flush_output = 3; + * + *
+         * relative to store dir (if this is a COMMIT_FLUSH)
+         * 
+ */ + public Builder setFlushOutput( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureFlushOutputIsMutable(); + flushOutput_.set(index, value); + onChanged(); + return this; + } + /** + * repeated string flush_output = 3; + * + *
+         * relative to store dir (if this is a COMMIT_FLUSH)
+         * 
+ */ + public Builder addFlushOutput( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureFlushOutputIsMutable(); + flushOutput_.add(value); + onChanged(); + return this; + } + /** + * repeated string flush_output = 3; + * + *
+         * relative to store dir (if this is a COMMIT_FLUSH)
+         * 
+ */ + public Builder addAllFlushOutput( + java.lang.Iterable values) { + ensureFlushOutputIsMutable(); + super.addAll(values, flushOutput_); + onChanged(); + return this; + } + /** + * repeated string flush_output = 3; + * + *
+         * relative to store dir (if this is a COMMIT_FLUSH)
+         * 
+ */ + public Builder clearFlushOutput() { + flushOutput_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + /** + * repeated string flush_output = 3; + * + *
+         * relative to store dir (if this is a COMMIT_FLUSH)
+         * 
+ */ + public Builder addFlushOutputBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + ensureFlushOutputIsMutable(); + flushOutput_.add(value); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:FlushDescriptor.StoreFlushDescriptor) + } + + static { + defaultInstance = new StoreFlushDescriptor(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:FlushDescriptor.StoreFlushDescriptor) + } + + private int bitField0_; + // required .FlushDescriptor.FlushAction action = 1; + public static final int ACTION_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.FlushAction action_; + /** + * required .FlushDescriptor.FlushAction action = 1; + */ + public boolean hasAction() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .FlushDescriptor.FlushAction action = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.FlushAction getAction() { + return action_; + } + + // required bytes table_name = 2; + public static final int TABLE_NAME_FIELD_NUMBER = 2; + private com.google.protobuf.ByteString tableName_; + /** + * required bytes table_name = 2; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required bytes table_name = 2; + */ + public com.google.protobuf.ByteString getTableName() { + return tableName_; + } + + // required bytes encoded_region_name = 3; + public static final int ENCODED_REGION_NAME_FIELD_NUMBER = 3; + private com.google.protobuf.ByteString encodedRegionName_; + /** + * required bytes encoded_region_name = 3; + */ + public boolean hasEncodedRegionName() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required bytes encoded_region_name = 3; + */ + public com.google.protobuf.ByteString getEncodedRegionName() { + return encodedRegionName_; + } + + // optional uint64 flush_sequence_number = 4; + public static final int FLUSH_SEQUENCE_NUMBER_FIELD_NUMBER = 4; + private long flushSequenceNumber_; + /** + * optional uint64 flush_sequence_number = 4; + */ + public boolean hasFlushSequenceNumber() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional uint64 flush_sequence_number = 4; + */ + public long getFlushSequenceNumber() { + return flushSequenceNumber_; + } + + // repeated .FlushDescriptor.StoreFlushDescriptor store_flushes = 5; + public static final int STORE_FLUSHES_FIELD_NUMBER = 5; + private java.util.List storeFlushes_; + /** + * repeated .FlushDescriptor.StoreFlushDescriptor store_flushes = 5; + */ + public java.util.List getStoreFlushesList() { + return storeFlushes_; + } + /** + * repeated .FlushDescriptor.StoreFlushDescriptor store_flushes = 5; + */ + public java.util.List + getStoreFlushesOrBuilderList() { + return storeFlushes_; + } + /** + * repeated .FlushDescriptor.StoreFlushDescriptor store_flushes = 5; + */ + public int getStoreFlushesCount() { + return storeFlushes_.size(); + } + /** + * repeated .FlushDescriptor.StoreFlushDescriptor store_flushes = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor getStoreFlushes(int index) { + return storeFlushes_.get(index); + } + /** + * repeated .FlushDescriptor.StoreFlushDescriptor store_flushes = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptorOrBuilder getStoreFlushesOrBuilder( + int index) { + return storeFlushes_.get(index); + } + + private void initFields() { + action_ = org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.FlushAction.START_FLUSH; + tableName_ = com.google.protobuf.ByteString.EMPTY; + encodedRegionName_ = com.google.protobuf.ByteString.EMPTY; + flushSequenceNumber_ = 0L; + storeFlushes_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasAction()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasTableName()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasEncodedRegionName()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getStoreFlushesCount(); i++) { + if (!getStoreFlushes(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeEnum(1, action_.getNumber()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, tableName_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBytes(3, encodedRegionName_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeUInt64(4, flushSequenceNumber_); + } + for (int i = 0; i < storeFlushes_.size(); i++) { + output.writeMessage(5, storeFlushes_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(1, action_.getNumber()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, tableName_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(3, encodedRegionName_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(4, flushSequenceNumber_); + } + for (int i = 0; i < storeFlushes_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(5, storeFlushes_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor other = (org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor) obj; + + boolean result = true; + result = result && (hasAction() == other.hasAction()); + if (hasAction()) { + result = result && + (getAction() == other.getAction()); + } + result = result && (hasTableName() == other.hasTableName()); + if (hasTableName()) { + result = result && getTableName() + .equals(other.getTableName()); + } + result = result && (hasEncodedRegionName() == other.hasEncodedRegionName()); + if (hasEncodedRegionName()) { + result = result && getEncodedRegionName() + .equals(other.getEncodedRegionName()); + } + result = result && (hasFlushSequenceNumber() == other.hasFlushSequenceNumber()); + if (hasFlushSequenceNumber()) { + result = result && (getFlushSequenceNumber() + == other.getFlushSequenceNumber()); + } + result = result && getStoreFlushesList() + .equals(other.getStoreFlushesList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasAction()) { + hash = (37 * hash) + ACTION_FIELD_NUMBER; + hash = (53 * hash) + hashEnum(getAction()); + } + if (hasTableName()) { + hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER; + hash = (53 * hash) + getTableName().hashCode(); + } + if (hasEncodedRegionName()) { + hash = (37 * hash) + ENCODED_REGION_NAME_FIELD_NUMBER; + hash = (53 * hash) + getEncodedRegionName().hashCode(); + } + if (hasFlushSequenceNumber()) { + hash = (37 * hash) + FLUSH_SEQUENCE_NUMBER_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getFlushSequenceNumber()); + } + if (getStoreFlushesCount() > 0) { + hash = (37 * hash) + STORE_FLUSHES_FIELD_NUMBER; + hash = (53 * hash) + getStoreFlushesList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code FlushDescriptor} + * + *
+     **
+     * Special WAL entry to hold all related to a flush.
+     * 
+ */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptorOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.WALProtos.internal_static_FlushDescriptor_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.WALProtos.internal_static_FlushDescriptor_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.class, org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getStoreFlushesFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + action_ = org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.FlushAction.START_FLUSH; + bitField0_ = (bitField0_ & ~0x00000001); + tableName_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000002); + encodedRegionName_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000004); + flushSequenceNumber_ = 0L; + bitField0_ = (bitField0_ & ~0x00000008); + if (storeFlushesBuilder_ == null) { + storeFlushes_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000010); + } else { + storeFlushesBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.WALProtos.internal_static_FlushDescriptor_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor build() { + org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor result = new org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.action_ = action_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.tableName_ = tableName_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.encodedRegionName_ = encodedRegionName_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + result.flushSequenceNumber_ = flushSequenceNumber_; + if (storeFlushesBuilder_ == null) { + if (((bitField0_ & 0x00000010) == 0x00000010)) { + storeFlushes_ = java.util.Collections.unmodifiableList(storeFlushes_); + bitField0_ = (bitField0_ & ~0x00000010); + } + result.storeFlushes_ = storeFlushes_; + } else { + result.storeFlushes_ = storeFlushesBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.getDefaultInstance()) return this; + if (other.hasAction()) { + setAction(other.getAction()); + } + if (other.hasTableName()) { + setTableName(other.getTableName()); + } + if (other.hasEncodedRegionName()) { + setEncodedRegionName(other.getEncodedRegionName()); + } + if (other.hasFlushSequenceNumber()) { + setFlushSequenceNumber(other.getFlushSequenceNumber()); + } + if (storeFlushesBuilder_ == null) { + if (!other.storeFlushes_.isEmpty()) { + if (storeFlushes_.isEmpty()) { + storeFlushes_ = other.storeFlushes_; + bitField0_ = (bitField0_ & ~0x00000010); + } else { + ensureStoreFlushesIsMutable(); + storeFlushes_.addAll(other.storeFlushes_); + } + onChanged(); + } + } else { + if (!other.storeFlushes_.isEmpty()) { + if (storeFlushesBuilder_.isEmpty()) { + storeFlushesBuilder_.dispose(); + storeFlushesBuilder_ = null; + storeFlushes_ = other.storeFlushes_; + bitField0_ = (bitField0_ & ~0x00000010); + storeFlushesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getStoreFlushesFieldBuilder() : null; + } else { + storeFlushesBuilder_.addAllMessages(other.storeFlushes_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasAction()) { + + return false; + } + if (!hasTableName()) { + + return false; + } + if (!hasEncodedRegionName()) { + + return false; + } + for (int i = 0; i < getStoreFlushesCount(); i++) { + if (!getStoreFlushes(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .FlushDescriptor.FlushAction action = 1; + private org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.FlushAction action_ = org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.FlushAction.START_FLUSH; + /** + * required .FlushDescriptor.FlushAction action = 1; + */ + public boolean hasAction() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .FlushDescriptor.FlushAction action = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.FlushAction getAction() { + return action_; + } + /** + * required .FlushDescriptor.FlushAction action = 1; + */ + public Builder setAction(org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.FlushAction value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + action_ = value; + onChanged(); + return this; + } + /** + * required .FlushDescriptor.FlushAction action = 1; + */ + public Builder clearAction() { + bitField0_ = (bitField0_ & ~0x00000001); + action_ = org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.FlushAction.START_FLUSH; + onChanged(); + return this; + } + + // required bytes table_name = 2; + private com.google.protobuf.ByteString tableName_ = com.google.protobuf.ByteString.EMPTY; + /** + * required bytes table_name = 2; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required bytes table_name = 2; + */ + public com.google.protobuf.ByteString getTableName() { + return tableName_; + } + /** + * required bytes table_name = 2; + */ + public Builder setTableName(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + tableName_ = value; + onChanged(); + return this; + } + /** + * required bytes table_name = 2; + */ + public Builder clearTableName() { + bitField0_ = (bitField0_ & ~0x00000002); + tableName_ = getDefaultInstance().getTableName(); + onChanged(); + return this; + } + + // required bytes encoded_region_name = 3; + private com.google.protobuf.ByteString encodedRegionName_ = com.google.protobuf.ByteString.EMPTY; + /** + * required bytes encoded_region_name = 3; + */ + public boolean hasEncodedRegionName() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required bytes encoded_region_name = 3; + */ + public com.google.protobuf.ByteString getEncodedRegionName() { + return encodedRegionName_; + } + /** + * required bytes encoded_region_name = 3; + */ + public Builder setEncodedRegionName(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + encodedRegionName_ = value; + onChanged(); + return this; + } + /** + * required bytes encoded_region_name = 3; + */ + public Builder clearEncodedRegionName() { + bitField0_ = (bitField0_ & ~0x00000004); + encodedRegionName_ = getDefaultInstance().getEncodedRegionName(); + onChanged(); + return this; + } + + // optional uint64 flush_sequence_number = 4; + private long flushSequenceNumber_ ; + /** + * optional uint64 flush_sequence_number = 4; + */ + public boolean hasFlushSequenceNumber() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional uint64 flush_sequence_number = 4; + */ + public long getFlushSequenceNumber() { + return flushSequenceNumber_; + } + /** + * optional uint64 flush_sequence_number = 4; + */ + public Builder setFlushSequenceNumber(long value) { + bitField0_ |= 0x00000008; + flushSequenceNumber_ = value; + onChanged(); + return this; + } + /** + * optional uint64 flush_sequence_number = 4; + */ + public Builder clearFlushSequenceNumber() { + bitField0_ = (bitField0_ & ~0x00000008); + flushSequenceNumber_ = 0L; + onChanged(); + return this; + } + + // repeated .FlushDescriptor.StoreFlushDescriptor store_flushes = 5; + private java.util.List storeFlushes_ = + java.util.Collections.emptyList(); + private void ensureStoreFlushesIsMutable() { + if (!((bitField0_ & 0x00000010) == 0x00000010)) { + storeFlushes_ = new java.util.ArrayList(storeFlushes_); + bitField0_ |= 0x00000010; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor, org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor.Builder, org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptorOrBuilder> storeFlushesBuilder_; + + /** + * repeated .FlushDescriptor.StoreFlushDescriptor store_flushes = 5; + */ + public java.util.List getStoreFlushesList() { + if (storeFlushesBuilder_ == null) { + return java.util.Collections.unmodifiableList(storeFlushes_); + } else { + return storeFlushesBuilder_.getMessageList(); + } + } + /** + * repeated .FlushDescriptor.StoreFlushDescriptor store_flushes = 5; + */ + public int getStoreFlushesCount() { + if (storeFlushesBuilder_ == null) { + return storeFlushes_.size(); + } else { + return storeFlushesBuilder_.getCount(); + } + } + /** + * repeated .FlushDescriptor.StoreFlushDescriptor store_flushes = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor getStoreFlushes(int index) { + if (storeFlushesBuilder_ == null) { + return storeFlushes_.get(index); + } else { + return storeFlushesBuilder_.getMessage(index); + } + } + /** + * repeated .FlushDescriptor.StoreFlushDescriptor store_flushes = 5; + */ + public Builder setStoreFlushes( + int index, org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor value) { + if (storeFlushesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStoreFlushesIsMutable(); + storeFlushes_.set(index, value); + onChanged(); + } else { + storeFlushesBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .FlushDescriptor.StoreFlushDescriptor store_flushes = 5; + */ + public Builder setStoreFlushes( + int index, org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor.Builder builderForValue) { + if (storeFlushesBuilder_ == null) { + ensureStoreFlushesIsMutable(); + storeFlushes_.set(index, builderForValue.build()); + onChanged(); + } else { + storeFlushesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .FlushDescriptor.StoreFlushDescriptor store_flushes = 5; + */ + public Builder addStoreFlushes(org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor value) { + if (storeFlushesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStoreFlushesIsMutable(); + storeFlushes_.add(value); + onChanged(); + } else { + storeFlushesBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .FlushDescriptor.StoreFlushDescriptor store_flushes = 5; + */ + public Builder addStoreFlushes( + int index, org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor value) { + if (storeFlushesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStoreFlushesIsMutable(); + storeFlushes_.add(index, value); + onChanged(); + } else { + storeFlushesBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .FlushDescriptor.StoreFlushDescriptor store_flushes = 5; + */ + public Builder addStoreFlushes( + org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor.Builder builderForValue) { + if (storeFlushesBuilder_ == null) { + ensureStoreFlushesIsMutable(); + storeFlushes_.add(builderForValue.build()); + onChanged(); + } else { + storeFlushesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .FlushDescriptor.StoreFlushDescriptor store_flushes = 5; + */ + public Builder addStoreFlushes( + int index, org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor.Builder builderForValue) { + if (storeFlushesBuilder_ == null) { + ensureStoreFlushesIsMutable(); + storeFlushes_.add(index, builderForValue.build()); + onChanged(); + } else { + storeFlushesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .FlushDescriptor.StoreFlushDescriptor store_flushes = 5; + */ + public Builder addAllStoreFlushes( + java.lang.Iterable values) { + if (storeFlushesBuilder_ == null) { + ensureStoreFlushesIsMutable(); + super.addAll(values, storeFlushes_); + onChanged(); + } else { + storeFlushesBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .FlushDescriptor.StoreFlushDescriptor store_flushes = 5; + */ + public Builder clearStoreFlushes() { + if (storeFlushesBuilder_ == null) { + storeFlushes_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + } else { + storeFlushesBuilder_.clear(); + } + return this; + } + /** + * repeated .FlushDescriptor.StoreFlushDescriptor store_flushes = 5; + */ + public Builder removeStoreFlushes(int index) { + if (storeFlushesBuilder_ == null) { + ensureStoreFlushesIsMutable(); + storeFlushes_.remove(index); + onChanged(); + } else { + storeFlushesBuilder_.remove(index); + } + return this; + } + /** + * repeated .FlushDescriptor.StoreFlushDescriptor store_flushes = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor.Builder getStoreFlushesBuilder( + int index) { + return getStoreFlushesFieldBuilder().getBuilder(index); + } + /** + * repeated .FlushDescriptor.StoreFlushDescriptor store_flushes = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptorOrBuilder getStoreFlushesOrBuilder( + int index) { + if (storeFlushesBuilder_ == null) { + return storeFlushes_.get(index); } else { + return storeFlushesBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .FlushDescriptor.StoreFlushDescriptor store_flushes = 5; + */ + public java.util.List + getStoreFlushesOrBuilderList() { + if (storeFlushesBuilder_ != null) { + return storeFlushesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(storeFlushes_); + } + } + /** + * repeated .FlushDescriptor.StoreFlushDescriptor store_flushes = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor.Builder addStoreFlushesBuilder() { + return getStoreFlushesFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor.getDefaultInstance()); + } + /** + * repeated .FlushDescriptor.StoreFlushDescriptor store_flushes = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor.Builder addStoreFlushesBuilder( + int index) { + return getStoreFlushesFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor.getDefaultInstance()); + } + /** + * repeated .FlushDescriptor.StoreFlushDescriptor store_flushes = 5; + */ + public java.util.List + getStoreFlushesBuilderList() { + return getStoreFlushesFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor, org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor.Builder, org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptorOrBuilder> + getStoreFlushesFieldBuilder() { + if (storeFlushesBuilder_ == null) { + storeFlushesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor, org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor.Builder, org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptorOrBuilder>( + storeFlushes_, + ((bitField0_ & 0x00000010) == 0x00000010), + getParentForChildren(), + isClean()); + storeFlushes_ = null; + } + return storeFlushesBuilder_; + } + + // @@protoc_insertion_point(builder_scope:FlushDescriptor) + } + + static { + defaultInstance = new FlushDescriptor(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:FlushDescriptor) + } + public interface WALTrailerOrBuilder extends com.google.protobuf.MessageOrBuilder { } @@ -5375,6 +7640,16 @@ public final class WALProtos { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_CompactionDescriptor_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_FlushDescriptor_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_FlushDescriptor_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_FlushDescriptor_StoreFlushDescriptor_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_FlushDescriptor_StoreFlushDescriptor_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_WALTrailer_descriptor; private static @@ -5405,11 +7680,20 @@ public final class WALProtos { "coded_region_name\030\002 \002(\014\022\023\n\013family_name\030\003" + " \002(\014\022\030\n\020compaction_input\030\004 \003(\t\022\031\n\021compac" + "tion_output\030\005 \003(\t\022\026\n\016store_home_dir\030\006 \002(" + - "\t\022\023\n\013region_name\030\007 \001(\014\"\014\n\nWALTrailer*F\n\t" + - "ScopeType\022\033\n\027REPLICATION_SCOPE_LOCAL\020\000\022\034" + - "\n\030REPLICATION_SCOPE_GLOBAL\020\001B?\n*org.apac" + - "he.hadoop.hbase.protobuf.generatedB\tWALP", - "rotosH\001\210\001\000\240\001\001" + "\t\022\023\n\013region_name\030\007 \001(\014\"\353\002\n\017FlushDescript" + + "or\022,\n\006action\030\001 \002(\0162\034.FlushDescriptor.Flu" + + "shAction\022\022\n\ntable_name\030\002 \002(\014\022\033\n\023encoded_" + + "region_name\030\003 \002(\014\022\035\n\025flush_sequence_numb", + "er\030\004 \001(\004\022<\n\rstore_flushes\030\005 \003(\0132%.FlushD" + + "escriptor.StoreFlushDescriptor\032Y\n\024StoreF" + + "lushDescriptor\022\023\n\013family_name\030\001 \002(\014\022\026\n\016s" + + "tore_home_dir\030\002 \002(\t\022\024\n\014flush_output\030\003 \003(" + + "\t\"A\n\013FlushAction\022\017\n\013START_FLUSH\020\000\022\020\n\014COM" + + "MIT_FLUSH\020\001\022\017\n\013ABORT_FLUSH\020\002\"\014\n\nWALTrail" + + "er*F\n\tScopeType\022\033\n\027REPLICATION_SCOPE_LOC" + + "AL\020\000\022\034\n\030REPLICATION_SCOPE_GLOBAL\020\001B?\n*or" + + "g.apache.hadoop.hbase.protobuf.generated" + + "B\tWALProtosH\001\210\001\000\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -5440,8 +7724,20 @@ public final class WALProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_CompactionDescriptor_descriptor, new java.lang.String[] { "TableName", "EncodedRegionName", "FamilyName", "CompactionInput", "CompactionOutput", "StoreHomeDir", "RegionName", }); - internal_static_WALTrailer_descriptor = + internal_static_FlushDescriptor_descriptor = getDescriptor().getMessageTypes().get(4); + internal_static_FlushDescriptor_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_FlushDescriptor_descriptor, + new java.lang.String[] { "Action", "TableName", "EncodedRegionName", "FlushSequenceNumber", "StoreFlushes", }); + internal_static_FlushDescriptor_StoreFlushDescriptor_descriptor = + internal_static_FlushDescriptor_descriptor.getNestedTypes().get(0); + internal_static_FlushDescriptor_StoreFlushDescriptor_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_FlushDescriptor_StoreFlushDescriptor_descriptor, + new java.lang.String[] { "FamilyName", "StoreHomeDir", "FlushOutput", }); + internal_static_WALTrailer_descriptor = + getDescriptor().getMessageTypes().get(5); internal_static_WALTrailer_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_WALTrailer_descriptor, diff --git a/hbase-protocol/src/main/protobuf/WAL.proto b/hbase-protocol/src/main/protobuf/WAL.proto index 88e94f4af1d..f14d5f4343f 100644 --- a/hbase-protocol/src/main/protobuf/WAL.proto +++ b/hbase-protocol/src/main/protobuf/WAL.proto @@ -89,12 +89,35 @@ message CompactionDescriptor { required bytes table_name = 1; // TODO: WALKey already stores these, might remove required bytes encoded_region_name = 2; required bytes family_name = 3; - repeated string compaction_input = 4; + repeated string compaction_input = 4; // relative to store dir repeated string compaction_output = 5; - required string store_home_dir = 6; + required string store_home_dir = 6; // relative to region dir optional bytes region_name = 7; // full region name } +/** + * Special WAL entry to hold all related to a flush. + */ +message FlushDescriptor { + enum FlushAction { + START_FLUSH = 0; + COMMIT_FLUSH = 1; + ABORT_FLUSH = 2; + } + + message StoreFlushDescriptor { + required bytes family_name = 1; + required string store_home_dir = 2; //relative to region dir + repeated string flush_output = 3; // relative to store dir (if this is a COMMIT_FLUSH) + } + + required FlushAction action = 1; + required bytes table_name = 2; + required bytes encoded_region_name = 3; + optional uint64 flush_sequence_number = 4; + repeated StoreFlushDescriptor store_flushes = 5; +} + /** * A trailer that is appended to the end of a properly closed HLog WAL file. * If missing, this is either a legacy or a corrupted WAL file. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 80ca84865c8..fbb1ac0087e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -30,6 +30,7 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.NavigableMap; @@ -113,10 +114,13 @@ import org.apache.hadoop.hbase.ipc.RpcCallContext; import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.monitoring.TaskMonitor; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor; +import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor; +import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.FlushAction; import org.apache.hadoop.hbase.regionserver.MultiVersionConsistencyControl.WriteEntry; import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; import org.apache.hadoop.hbase.regionserver.wal.HLog; @@ -1729,8 +1733,11 @@ public class HRegion implements HeapSize { // , Writable{ status.setStatus("Preparing to flush by snapshotting stores in " + getRegionInfo().getEncodedName()); List storeFlushCtxs = new ArrayList(stores.size()); + TreeMap> committedFiles = new TreeMap>( + Bytes.BYTES_COMPARATOR); long flushSeqId = -1L; + long trxId = 0; try { try { w = mvcc.beginMemstoreInsert(); @@ -1754,12 +1761,39 @@ public class HRegion implements HeapSize { // , Writable{ for (Store s : stores.values()) { totalFlushableSize += s.getFlushableSize(); storeFlushCtxs.add(s.createFlushContext(flushSeqId)); + committedFiles.put(s.getFamily().getName(), null); // for writing stores to WAL + } + + // write the snapshot start to WAL + if (wal != null) { + FlushDescriptor desc = ProtobufUtil.toFlushDescriptor(FlushAction.START_FLUSH, + getRegionInfo(), flushSeqId, committedFiles); + trxId = HLogUtil.writeFlushMarker(wal, this.htableDescriptor, getRegionInfo(), + desc, sequenceId, false); // no sync. Sync is below where we do not hold the updates lock } // Prepare flush (take a snapshot) for (StoreFlushContext flush : storeFlushCtxs) { flush.prepare(); } + } catch (IOException ex) { + if (wal != null) { + if (trxId > 0) { // check whether we have already written START_FLUSH to WAL + try { + FlushDescriptor desc = ProtobufUtil.toFlushDescriptor(FlushAction.ABORT_FLUSH, + getRegionInfo(), flushSeqId, committedFiles); + HLogUtil.writeFlushMarker(wal, this.htableDescriptor, getRegionInfo(), + desc, sequenceId, false); + } catch (Throwable t) { + LOG.warn("Received unexpected exception trying to write ABORT_FLUSH marker to WAL:" + + StringUtils.stringifyException(t)); + // ignore this since we will be aborting the RS with DSE. + } + } + // we have called wal.startCacheFlush(), now we have to abort it + wal.abortCacheFlush(this.getRegionInfo().getEncodedNameAsBytes()); + throw ex; // let upper layers deal with it. + } } finally { this.updatesLock.writeLock().unlock(); } @@ -1767,9 +1801,16 @@ public class HRegion implements HeapSize { // , Writable{ ", syncing WAL and waiting on mvcc, flushsize=" + totalFlushableSize; status.setStatus(s); if (LOG.isTraceEnabled()) LOG.trace(s); - // sync unflushed WAL changes when deferred log sync is enabled + // sync unflushed WAL changes // see HBASE-8208 for details - if (wal != null && !shouldSyncLog()) wal.sync(); + if (wal != null) { + try { + wal.sync(); // ensure that flush marker is sync'ed + } catch (IOException ioe) { + LOG.warn("Unexpected exception while log.sync(), ignoring. Exception: " + + StringUtils.stringifyException(ioe)); + } + } // wait for all in-progress transactions to commit to HLog before // we can start the flush. This prevents @@ -1806,16 +1847,27 @@ public class HRegion implements HeapSize { // , Writable{ // Switch snapshot (in memstore) -> new hfile (thus causing // all the store scanners to reset/reseek). + Iterator it = stores.values().iterator(); // stores.values() and storeFlushCtxs have + // same order for (StoreFlushContext flush : storeFlushCtxs) { boolean needsCompaction = flush.commit(status); if (needsCompaction) { compactionRequested = true; } + committedFiles.put(it.next().getFamily().getName(), flush.getCommittedFiles()); } storeFlushCtxs.clear(); // Set down the memstore size by amount of flush. this.addAndGetGlobalMemstoreSize(-totalFlushableSize); + + if (wal != null) { + // write flush marker to WAL. If fail, we should throw DroppedSnapshotException + FlushDescriptor desc = ProtobufUtil.toFlushDescriptor(FlushAction.COMMIT_FLUSH, + getRegionInfo(), flushSeqId, committedFiles); + HLogUtil.writeFlushMarker(wal, this.htableDescriptor, getRegionInfo(), + desc, sequenceId, true); + } } catch (Throwable t) { // An exception here means that the snapshot was not persisted. // The hlog needs to be replayed so its content is restored to memstore. @@ -1824,6 +1876,16 @@ public class HRegion implements HeapSize { // , Writable{ // exceptions -- e.g. HBASE-659 was about an NPE -- so now we catch // all and sundry. if (wal != null) { + try { + FlushDescriptor desc = ProtobufUtil.toFlushDescriptor(FlushAction.ABORT_FLUSH, + getRegionInfo(), flushSeqId, committedFiles); + HLogUtil.writeFlushMarker(wal, this.htableDescriptor, getRegionInfo(), + desc, sequenceId, false); + } catch (Throwable ex) { + LOG.warn("Received unexpected exception trying to write ABORT_FLUSH marker to WAL:" + + StringUtils.stringifyException(ex)); + // ignore this since we will be aborting the RS with DSE. + } wal.abortCacheFlush(this.getRegionInfo().getEncodedNameAsBytes()); } DroppedSnapshotException dse = new DroppedSnapshotException("region: " + diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index e059fe815da..3f5729adda5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -2035,6 +2035,7 @@ public class HStore implements Store { private long cacheFlushSeqNum; private MemStoreSnapshot snapshot; private List tempFiles; + private List committedFiles; private StoreFlusherImpl(long cacheFlushSeqNum) { this.cacheFlushSeqNum = cacheFlushSeqNum; @@ -2047,6 +2048,7 @@ public class HStore implements Store { @Override public void prepare() { this.snapshot = memstore.snapshot(); + committedFiles = new ArrayList(1); } @Override @@ -2079,14 +2081,20 @@ public class HStore implements Store { } } - if (HStore.this.getCoprocessorHost() != null) { - for (StoreFile sf : storeFiles) { + for (StoreFile sf : storeFiles) { + if (HStore.this.getCoprocessorHost() != null) { HStore.this.getCoprocessorHost().postFlush(HStore.this, sf); } + committedFiles.add(sf.getPath()); } // Add new file to store files. Clear snapshot too while we have the Store write lock. return HStore.this.updateStorefiles(storeFiles, snapshot.getId()); } + + @Override + public List getCommittedFiles() { + return committedFiles; + } } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlushContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlushContext.java index 193a81166b8..fdf1f1eeb5b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlushContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlushContext.java @@ -19,8 +19,10 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; +import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.monitoring.MonitoredTask; /** @@ -61,4 +63,10 @@ interface StoreFlushContext { * @throws IOException */ boolean commit(MonitoredTask status) throws IOException; + + /** + * Returns the newly committed files from the flush. Called only if commit returns true + * @return a list of Paths for new files + */ + List getCommittedFiles(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogUtil.java index a0707f7d859..2c4652b2d79 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogUtil.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor; +import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor; import org.apache.hadoop.hbase.util.FSUtils; import com.google.protobuf.TextFormat; @@ -268,4 +269,19 @@ public class HLogUtil { LOG.trace("Appended compaction marker " + TextFormat.shortDebugString(c)); } } + + /** + * Write a flush marker indicating a start / abort or a complete of a region flush + */ + public static long writeFlushMarker(HLog log, HTableDescriptor htd, HRegionInfo info, + final FlushDescriptor f, AtomicLong sequenceId, boolean sync) throws IOException { + TableName tn = TableName.valueOf(f.getTableName().toByteArray()); + HLogKey key = new HLogKey(info.getEncodedNameAsBytes(), tn); + long trx = log.appendNoSync(htd, info, key, WALEdit.createFlushWALEdit(info, f), sequenceId, false, null); + if (sync) log.sync(trx); + if (LOG.isTraceEnabled()) { + LOG.trace("Appended flush marker " + TextFormat.shortDebugString(f)); + } + return trx; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java index 24d9d6d1730..f684d7dfe30 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java @@ -36,8 +36,10 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.codec.Codec; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor; +import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ClassSize; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.io.Writable; @@ -83,6 +85,8 @@ public class WALEdit implements Writable, HeapSize { public static final byte [] METAFAMILY = Bytes.toBytes("METAFAMILY"); static final byte [] METAROW = Bytes.toBytes("METAROW"); static final byte[] COMPACTION = Bytes.toBytes("HBASE::COMPACTION"); + static final byte [] FLUSH = Bytes.toBytes("HBASE::FLUSH"); + private final int VERSION_2 = -1; private final boolean isReplay; @@ -112,6 +116,10 @@ public class WALEdit implements Writable, HeapSize { return Bytes.equals(METAFAMILY, f); } + public static boolean isMetaEditFamily(Cell cell) { + return CellUtil.matchingFamily(cell, METAFAMILY); + } + /** * @return True when current WALEdit is created by log replay. Replication skips WALEdits from * replay. @@ -256,6 +264,19 @@ public class WALEdit implements Writable, HeapSize { return sb.toString(); } + public static WALEdit createFlushWALEdit(HRegionInfo hri, FlushDescriptor f) { + KeyValue kv = new KeyValue(getRowForRegion(hri), METAFAMILY, FLUSH, + EnvironmentEdgeManager.currentTimeMillis(), f.toByteArray()); + return new WALEdit().add(kv); + } + + public static FlushDescriptor getFlushDescriptor(Cell cell) throws IOException { + if (CellUtil.matchingColumn(cell, METAFAMILY, FLUSH)) { + return FlushDescriptor.parseFrom(cell.getValue()); + } + return null; + } + /** * Create a compacion WALEdit * @param c @@ -264,7 +285,7 @@ public class WALEdit implements Writable, HeapSize { public static WALEdit createCompaction(final HRegionInfo hri, final CompactionDescriptor c) { byte [] pbbytes = c.toByteArray(); KeyValue kv = new KeyValue(getRowForRegion(hri), METAFAMILY, COMPACTION, - System.currentTimeMillis(), pbbytes); + EnvironmentEdgeManager.currentTimeMillis(), pbbytes); return new WALEdit().add(kv); //replication scope null so that this won't be replicated } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index 15e530aa2da..6e050a088a6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -35,10 +35,12 @@ import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyLong; +import static org.mockito.Matchers.argThat; import static org.mockito.Mockito.never; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; import java.io.IOException; import java.io.InterruptedIOException; @@ -111,6 +113,9 @@ import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.monitoring.TaskMonitor; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor; +import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor; +import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.FlushAction; +import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor; import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl; import org.apache.hadoop.hbase.regionserver.HRegion.RowLock; import org.apache.hadoop.hbase.regionserver.TestStore.FaultyFileSystem; @@ -136,6 +141,7 @@ import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.TestName; +import org.mockito.ArgumentMatcher; import org.mockito.Mockito; import com.google.common.collect.Lists; @@ -786,6 +792,228 @@ public class TestHRegion { } } + @Test + public void testFlushMarkers() throws Exception { + // tests that flush markers are written to WAL and handled at recovered edits + String method = name.getMethodName(); + TableName tableName = TableName.valueOf(method); + byte[] family = Bytes.toBytes("family"); + Path logDir = TEST_UTIL.getDataTestDirOnTestFS("testRecoveredEditsIgnoreFlushMarkers.log"); + HLog hlog = HLogFactory.createHLog(FILESYSTEM, logDir, UUID.randomUUID().toString(), + TEST_UTIL.getConfiguration()); + + this.region = initHRegion(tableName.getName(), HConstants.EMPTY_START_ROW, + HConstants.EMPTY_END_ROW, method, CONF, false, Durability.USE_DEFAULT, hlog, family); + try { + Path regiondir = region.getRegionFileSystem().getRegionDir(); + FileSystem fs = region.getRegionFileSystem().getFileSystem(); + byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes(); + + long maxSeqId = 3; + long minSeqId = 0; + + for (long i = minSeqId; i < maxSeqId; i++) { + Put put = new Put(Bytes.toBytes(i)); + put.add(family, Bytes.toBytes(i), Bytes.toBytes(i)); + region.put(put); + region.flushcache(); + } + + // this will create a region with 3 files from flush + assertEquals(3, region.getStore(family).getStorefilesCount()); + List storeFiles = new ArrayList(3); + for (StoreFile sf : region.getStore(family).getStorefiles()) { + storeFiles.add(sf.getPath().getName()); + } + + // now verify that the flush markers are written + hlog.close(); + HLog.Reader reader = HLogFactory.createReader(fs, + fs.listStatus(fs.listStatus(logDir)[0].getPath())[0].getPath(), + TEST_UTIL.getConfiguration()); + + List flushDescriptors = new ArrayList(); + long lastFlushSeqId = -1; + while (true) { + HLog.Entry entry = reader.next(); + if (entry == null) { + break; + } + Cell cell = entry.getEdit().getKeyValues().get(0); + if (WALEdit.isMetaEditFamily(cell)) { + FlushDescriptor flushDesc = WALEdit.getFlushDescriptor(cell); + assertNotNull(flushDesc); + assertArrayEquals(tableName.getName(), flushDesc.getTableName().toByteArray()); + if (flushDesc.getAction() == FlushAction.START_FLUSH) { + assertTrue(flushDesc.getFlushSequenceNumber() > lastFlushSeqId); + } else if (flushDesc.getAction() == FlushAction.COMMIT_FLUSH) { + assertTrue(flushDesc.getFlushSequenceNumber() == lastFlushSeqId); + } + lastFlushSeqId = flushDesc.getFlushSequenceNumber(); + assertArrayEquals(regionName, flushDesc.getEncodedRegionName().toByteArray()); + assertEquals(1, flushDesc.getStoreFlushesCount()); //only one store + StoreFlushDescriptor storeFlushDesc = flushDesc.getStoreFlushes(0); + assertArrayEquals(family, storeFlushDesc.getFamilyName().toByteArray()); + assertEquals("family", storeFlushDesc.getStoreHomeDir()); + if (flushDesc.getAction() == FlushAction.START_FLUSH) { + assertEquals(0, storeFlushDesc.getFlushOutputCount()); + } else { + assertEquals(1, storeFlushDesc.getFlushOutputCount()); //only one file from flush + assertTrue(storeFiles.contains(storeFlushDesc.getFlushOutput(0))); + } + + flushDescriptors.add(entry); + } + } + + assertEquals(3 * 2, flushDescriptors.size()); // START_FLUSH and COMMIT_FLUSH per flush + + // now write those markers to the recovered edits again. + + Path recoveredEditsDir = HLogUtil.getRegionDirRecoveredEditsDir(regiondir); + + Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", 1000)); + fs.create(recoveredEdits); + HLog.Writer writer = HLogFactory.createRecoveredEditsWriter(fs, recoveredEdits, CONF); + + for (HLog.Entry entry : flushDescriptors) { + writer.append(entry); + } + writer.close(); + + // close the region now, and reopen again + region.close(); + region = HRegion.openHRegion(region, null); + + // now check whether we have can read back the data from region + for (long i = minSeqId; i < maxSeqId; i++) { + Get get = new Get(Bytes.toBytes(i)); + Result result = region.get(get); + byte[] value = result.getValue(family, Bytes.toBytes(i)); + assertArrayEquals(Bytes.toBytes(i), value); + } + } finally { + HRegion.closeHRegion(this.region); + this.region = null; + } + } + + class IsFlushWALMarker extends ArgumentMatcher { + volatile FlushAction[] actions; + public IsFlushWALMarker(FlushAction... actions) { + this.actions = actions; + } + @Override + public boolean matches(Object edit) { + List kvs = ((WALEdit)edit).getKeyValues(); + if (kvs.isEmpty()) { + return false; + } + if (WALEdit.isMetaEditFamily(kvs.get(0))) { + FlushDescriptor desc = null; + try { + desc = WALEdit.getFlushDescriptor(kvs.get(0)); + } catch (IOException e) { + LOG.warn(e); + return false; + } + if (desc != null) { + for (FlushAction action : actions) { + if (desc.getAction() == action) { + return true; + } + } + } + } + return false; + } + public IsFlushWALMarker set(FlushAction... actions) { + this.actions = actions; + return this; + } + } + + @Test + @SuppressWarnings("unchecked") + public void testFlushMarkersWALFail() throws Exception { + // test the cases where the WAL append for flush markers fail. + String method = name.getMethodName(); + TableName tableName = TableName.valueOf(method); + byte[] family = Bytes.toBytes("family"); + + // spy an actual WAL implementation to throw exception (was not able to mock) + Path logDir = TEST_UTIL.getDataTestDirOnTestFS("testRecoveredEditsIgnoreFlushMarkers.log"); + HLog hlog = spy(HLogFactory.createHLog(FILESYSTEM, logDir, UUID.randomUUID().toString(), + TEST_UTIL.getConfiguration())); + + this.region = initHRegion(tableName.getName(), HConstants.EMPTY_START_ROW, + HConstants.EMPTY_END_ROW, method, CONF, false, Durability.USE_DEFAULT, hlog, family); + try { + int i = 0; + Put put = new Put(Bytes.toBytes(i)); + put.setDurability(Durability.SKIP_WAL); // have to skip mocked wal + put.add(family, Bytes.toBytes(i), Bytes.toBytes(i)); + region.put(put); + + // 1. Test case where START_FLUSH throws exception + IsFlushWALMarker isFlushWALMarker = new IsFlushWALMarker(FlushAction.START_FLUSH); + + // throw exceptions if the WalEdit is a start flush action + when(hlog.appendNoSync((HTableDescriptor)any(), (HRegionInfo)any(), (HLogKey)any(), + (WALEdit)argThat(isFlushWALMarker), (AtomicLong)any(), Mockito.anyBoolean(), + (List)any())) + .thenThrow(new IOException("Fail to append flush marker")); + + // start cache flush will throw exception + try { + region.flushcache(); + fail("This should have thrown exception"); + } catch (DroppedSnapshotException unexpected) { + // this should not be a dropped snapshot exception. Meaning that RS will not abort + throw unexpected; + } catch (IOException expected) { + // expected + } + + // 2. Test case where START_FLUSH succeeds but COMMIT_FLUSH will throw exception + isFlushWALMarker.set(FlushAction.COMMIT_FLUSH); + + try { + region.flushcache(); + fail("This should have thrown exception"); + } catch (DroppedSnapshotException expected) { + // we expect this exception, since we were able to write the snapshot, but failed to + // write the flush marker to WAL + } catch (IOException unexpected) { + throw unexpected; + } + + region.close(); + this.region = initHRegion(tableName.getName(), HConstants.EMPTY_START_ROW, + HConstants.EMPTY_END_ROW, method, CONF, false, Durability.USE_DEFAULT, hlog, family); + region.put(put); + + // 3. Test case where ABORT_FLUSH will throw exception. + // Even if ABORT_FLUSH throws exception, we should not fail with IOE, but continue with + // DroppedSnapshotException. Below COMMMIT_FLUSH will cause flush to abort + isFlushWALMarker.set(FlushAction.COMMIT_FLUSH, FlushAction.ABORT_FLUSH); + + try { + region.flushcache(); + fail("This should have thrown exception"); + } catch (DroppedSnapshotException expected) { + // we expect this exception, since we were able to write the snapshot, but failed to + // write the flush marker to WAL + } catch (IOException unexpected) { + throw unexpected; + } + + } finally { + HRegion.closeHRegion(this.region); + this.region = null; + } + } + @Test public void testGetWhileRegionClose() throws IOException { TableName tableName = TableName.valueOf(name.getMethodName());