From d44e7df5dc1e701fae4b611b9858dfd80499ee35 Mon Sep 17 00:00:00 2001 From: Enis Soztutar Date: Tue, 19 Aug 2014 18:45:21 -0700 Subject: [PATCH] HBASE-11512 Write region open/close events to WAL --- .../hadoop/hbase/protobuf/ProtobufUtil.java | 26 + .../hbase/protobuf/generated/WALProtos.java | 2455 ++++++++++++++++- hbase-protocol/src/main/protobuf/WAL.proto | 23 + .../hadoop/hbase/regionserver/HRegion.java | 52 + .../hbase/regionserver/wal/HLogUtil.java | 17 + .../hbase/regionserver/wal/WALEdit.java | 16 + .../master/TestDistributedLogSplitting.java | 38 +- .../hbase/regionserver/TestHRegion.java | 136 + .../hbase/regionserver/wal/TestWALReplay.java | 21 +- 9 files changed, 2755 insertions(+), 29 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java index 4033fb5abd3..86fe51568ab 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java @@ -120,6 +120,8 @@ import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.Regio import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.FlushAction; +import org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor; +import org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.EventType; import org.apache.hadoop.hbase.security.access.Permission; import org.apache.hadoop.hbase.security.access.TablePermission; import org.apache.hadoop.hbase.security.access.UserPermission; @@ -2529,6 +2531,30 @@ public final class ProtobufUtil { return desc.build(); } + public static RegionEventDescriptor toRegionEventDescriptor( + EventType eventType, HRegionInfo hri, long seqId, ServerName server, + Map> storeFiles) { + RegionEventDescriptor.Builder desc = RegionEventDescriptor.newBuilder() + .setEventType(eventType) + .setTableName(ByteStringer.wrap(hri.getTable().getName())) + .setEncodedRegionName(ByteStringer.wrap(hri.getEncodedNameAsBytes())) + .setLogSequenceNumber(seqId) + .setServer(toServerName(server)); + + for (Map.Entry> entry : storeFiles.entrySet()) { + RegionEventDescriptor.StoreDescriptor.Builder builder + = RegionEventDescriptor.StoreDescriptor.newBuilder() + .setFamilyName(ByteStringer.wrap(entry.getKey())) + .setStoreHomeDir(Bytes.toString(entry.getKey())); + for (Path path : entry.getValue()) { + builder.addStoreFile(path.getName()); + } + + desc.addStores(builder); + } + return desc.build(); + } + /** * Return short version of Message toString'd, shorter than TextFormat#shortDebugString. * Tries to NOT print out data both because it can be big but also so we do not have data in our diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/WALProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/WALProtos.java index c569b504c7b..af61d47d6b5 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/WALProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/WALProtos.java @@ -7600,6 +7600,2419 @@ public final class WALProtos { // @@protoc_insertion_point(class_scope:FlushDescriptor) } + public interface RegionEventDescriptorOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .RegionEventDescriptor.EventType event_type = 1; + /** + * required .RegionEventDescriptor.EventType event_type = 1; + */ + boolean hasEventType(); + /** + * required .RegionEventDescriptor.EventType event_type = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.EventType getEventType(); + + // required bytes table_name = 2; + /** + * required bytes table_name = 2; + */ + boolean hasTableName(); + /** + * required bytes table_name = 2; + */ + com.google.protobuf.ByteString getTableName(); + + // required bytes encoded_region_name = 3; + /** + * required bytes encoded_region_name = 3; + */ + boolean hasEncodedRegionName(); + /** + * required bytes encoded_region_name = 3; + */ + com.google.protobuf.ByteString getEncodedRegionName(); + + // optional uint64 log_sequence_number = 4; + /** + * optional uint64 log_sequence_number = 4; + */ + boolean hasLogSequenceNumber(); + /** + * optional uint64 log_sequence_number = 4; + */ + long getLogSequenceNumber(); + + // repeated .RegionEventDescriptor.StoreDescriptor stores = 5; + /** + * repeated .RegionEventDescriptor.StoreDescriptor stores = 5; + */ + java.util.List + getStoresList(); + /** + * repeated .RegionEventDescriptor.StoreDescriptor stores = 5; + */ + org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptor getStores(int index); + /** + * repeated .RegionEventDescriptor.StoreDescriptor stores = 5; + */ + int getStoresCount(); + /** + * repeated .RegionEventDescriptor.StoreDescriptor stores = 5; + */ + java.util.List + getStoresOrBuilderList(); + /** + * repeated .RegionEventDescriptor.StoreDescriptor stores = 5; + */ + org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptorOrBuilder getStoresOrBuilder( + int index); + + // optional .ServerName server = 6; + /** + * optional .ServerName server = 6; + * + *
+     * Server who opened the region
+     * 
+ */ + boolean hasServer(); + /** + * optional .ServerName server = 6; + * + *
+     * Server who opened the region
+     * 
+ */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServer(); + /** + * optional .ServerName server = 6; + * + *
+     * Server who opened the region
+     * 
+ */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerOrBuilder(); + } + /** + * Protobuf type {@code RegionEventDescriptor} + * + *
+   **
+   * Special WAL entry to hold all related to a region event (open/close).
+   * 
+ */ + public static final class RegionEventDescriptor extends + com.google.protobuf.GeneratedMessage + implements RegionEventDescriptorOrBuilder { + // Use RegionEventDescriptor.newBuilder() to construct. + private RegionEventDescriptor(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private RegionEventDescriptor(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final RegionEventDescriptor defaultInstance; + public static RegionEventDescriptor getDefaultInstance() { + return defaultInstance; + } + + public RegionEventDescriptor getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private RegionEventDescriptor( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.EventType value = org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.EventType.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(1, rawValue); + } else { + bitField0_ |= 0x00000001; + eventType_ = value; + } + break; + } + case 18: { + bitField0_ |= 0x00000002; + tableName_ = input.readBytes(); + break; + } + case 26: { + bitField0_ |= 0x00000004; + encodedRegionName_ = input.readBytes(); + break; + } + case 32: { + bitField0_ |= 0x00000008; + logSequenceNumber_ = input.readUInt64(); + break; + } + case 42: { + if (!((mutable_bitField0_ & 0x00000010) == 0x00000010)) { + stores_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000010; + } + stores_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptor.PARSER, extensionRegistry)); + break; + } + case 50: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = null; + if (((bitField0_ & 0x00000010) == 0x00000010)) { + subBuilder = server_.toBuilder(); + } + server_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(server_); + server_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000010; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000010) == 0x00000010)) { + stores_ = java.util.Collections.unmodifiableList(stores_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.WALProtos.internal_static_RegionEventDescriptor_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.WALProtos.internal_static_RegionEventDescriptor_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.class, org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public RegionEventDescriptor parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new RegionEventDescriptor(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + /** + * Protobuf enum {@code RegionEventDescriptor.EventType} + */ + public enum EventType + implements com.google.protobuf.ProtocolMessageEnum { + /** + * REGION_OPEN = 0; + */ + REGION_OPEN(0, 0), + /** + * REGION_CLOSE = 1; + */ + REGION_CLOSE(1, 1), + ; + + /** + * REGION_OPEN = 0; + */ + public static final int REGION_OPEN_VALUE = 0; + /** + * REGION_CLOSE = 1; + */ + public static final int REGION_CLOSE_VALUE = 1; + + + public final int getNumber() { return value; } + + public static EventType valueOf(int value) { + switch (value) { + case 0: return REGION_OPEN; + case 1: return REGION_CLOSE; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public EventType findValueByNumber(int number) { + return EventType.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.getDescriptor().getEnumTypes().get(0); + } + + private static final EventType[] VALUES = values(); + + public static EventType valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private EventType(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:RegionEventDescriptor.EventType) + } + + public interface StoreDescriptorOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required bytes family_name = 1; + /** + * required bytes family_name = 1; + */ + boolean hasFamilyName(); + /** + * required bytes family_name = 1; + */ + com.google.protobuf.ByteString getFamilyName(); + + // required string store_home_dir = 2; + /** + * required string store_home_dir = 2; + * + *
+       *relative to region dir
+       * 
+ */ + boolean hasStoreHomeDir(); + /** + * required string store_home_dir = 2; + * + *
+       *relative to region dir
+       * 
+ */ + java.lang.String getStoreHomeDir(); + /** + * required string store_home_dir = 2; + * + *
+       *relative to region dir
+       * 
+ */ + com.google.protobuf.ByteString + getStoreHomeDirBytes(); + + // repeated string store_file = 3; + /** + * repeated string store_file = 3; + * + *
+       * relative to store dir
+       * 
+ */ + java.util.List + getStoreFileList(); + /** + * repeated string store_file = 3; + * + *
+       * relative to store dir
+       * 
+ */ + int getStoreFileCount(); + /** + * repeated string store_file = 3; + * + *
+       * relative to store dir
+       * 
+ */ + java.lang.String getStoreFile(int index); + /** + * repeated string store_file = 3; + * + *
+       * relative to store dir
+       * 
+ */ + com.google.protobuf.ByteString + getStoreFileBytes(int index); + } + /** + * Protobuf type {@code RegionEventDescriptor.StoreDescriptor} + */ + public static final class StoreDescriptor extends + com.google.protobuf.GeneratedMessage + implements StoreDescriptorOrBuilder { + // Use StoreDescriptor.newBuilder() to construct. + private StoreDescriptor(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private StoreDescriptor(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final StoreDescriptor defaultInstance; + public static StoreDescriptor getDefaultInstance() { + return defaultInstance; + } + + public StoreDescriptor getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private StoreDescriptor( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + familyName_ = input.readBytes(); + break; + } + case 18: { + bitField0_ |= 0x00000002; + storeHomeDir_ = input.readBytes(); + break; + } + case 26: { + if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + storeFile_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000004; + } + storeFile_.add(input.readBytes()); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + storeFile_ = new com.google.protobuf.UnmodifiableLazyStringList(storeFile_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.WALProtos.internal_static_RegionEventDescriptor_StoreDescriptor_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.WALProtos.internal_static_RegionEventDescriptor_StoreDescriptor_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptor.class, org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptor.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public StoreDescriptor parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new StoreDescriptor(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required bytes family_name = 1; + public static final int FAMILY_NAME_FIELD_NUMBER = 1; + private com.google.protobuf.ByteString familyName_; + /** + * required bytes family_name = 1; + */ + public boolean hasFamilyName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bytes family_name = 1; + */ + public com.google.protobuf.ByteString getFamilyName() { + return familyName_; + } + + // required string store_home_dir = 2; + public static final int STORE_HOME_DIR_FIELD_NUMBER = 2; + private java.lang.Object storeHomeDir_; + /** + * required string store_home_dir = 2; + * + *
+       *relative to region dir
+       * 
+ */ + public boolean hasStoreHomeDir() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string store_home_dir = 2; + * + *
+       *relative to region dir
+       * 
+ */ + public java.lang.String getStoreHomeDir() { + java.lang.Object ref = storeHomeDir_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + storeHomeDir_ = s; + } + return s; + } + } + /** + * required string store_home_dir = 2; + * + *
+       *relative to region dir
+       * 
+ */ + public com.google.protobuf.ByteString + getStoreHomeDirBytes() { + java.lang.Object ref = storeHomeDir_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + storeHomeDir_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // repeated string store_file = 3; + public static final int STORE_FILE_FIELD_NUMBER = 3; + private com.google.protobuf.LazyStringList storeFile_; + /** + * repeated string store_file = 3; + * + *
+       * relative to store dir
+       * 
+ */ + public java.util.List + getStoreFileList() { + return storeFile_; + } + /** + * repeated string store_file = 3; + * + *
+       * relative to store dir
+       * 
+ */ + public int getStoreFileCount() { + return storeFile_.size(); + } + /** + * repeated string store_file = 3; + * + *
+       * relative to store dir
+       * 
+ */ + public java.lang.String getStoreFile(int index) { + return storeFile_.get(index); + } + /** + * repeated string store_file = 3; + * + *
+       * relative to store dir
+       * 
+ */ + public com.google.protobuf.ByteString + getStoreFileBytes(int index) { + return storeFile_.getByteString(index); + } + + private void initFields() { + familyName_ = com.google.protobuf.ByteString.EMPTY; + storeHomeDir_ = ""; + storeFile_ = com.google.protobuf.LazyStringArrayList.EMPTY; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasFamilyName()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasStoreHomeDir()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, familyName_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getStoreHomeDirBytes()); + } + for (int i = 0; i < storeFile_.size(); i++) { + output.writeBytes(3, storeFile_.getByteString(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, familyName_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getStoreHomeDirBytes()); + } + { + int dataSize = 0; + for (int i = 0; i < storeFile_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream + .computeBytesSizeNoTag(storeFile_.getByteString(i)); + } + size += dataSize; + size += 1 * getStoreFileList().size(); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptor)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptor other = (org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptor) obj; + + boolean result = true; + result = result && (hasFamilyName() == other.hasFamilyName()); + if (hasFamilyName()) { + result = result && getFamilyName() + .equals(other.getFamilyName()); + } + result = result && (hasStoreHomeDir() == other.hasStoreHomeDir()); + if (hasStoreHomeDir()) { + result = result && getStoreHomeDir() + .equals(other.getStoreHomeDir()); + } + result = result && getStoreFileList() + .equals(other.getStoreFileList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasFamilyName()) { + hash = (37 * hash) + FAMILY_NAME_FIELD_NUMBER; + hash = (53 * hash) + getFamilyName().hashCode(); + } + if (hasStoreHomeDir()) { + hash = (37 * hash) + STORE_HOME_DIR_FIELD_NUMBER; + hash = (53 * hash) + getStoreHomeDir().hashCode(); + } + if (getStoreFileCount() > 0) { + hash = (37 * hash) + STORE_FILE_FIELD_NUMBER; + hash = (53 * hash) + getStoreFileList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptor parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptor parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptor parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptor parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptor parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptor parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptor parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptor parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptor parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptor parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptor prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code RegionEventDescriptor.StoreDescriptor} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptorOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.WALProtos.internal_static_RegionEventDescriptor_StoreDescriptor_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.WALProtos.internal_static_RegionEventDescriptor_StoreDescriptor_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptor.class, org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptor.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptor.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + familyName_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + storeHomeDir_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + storeFile_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.WALProtos.internal_static_RegionEventDescriptor_StoreDescriptor_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptor getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptor.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptor build() { + org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptor result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptor buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptor result = new org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptor(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.familyName_ = familyName_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.storeHomeDir_ = storeHomeDir_; + if (((bitField0_ & 0x00000004) == 0x00000004)) { + storeFile_ = new com.google.protobuf.UnmodifiableLazyStringList( + storeFile_); + bitField0_ = (bitField0_ & ~0x00000004); + } + result.storeFile_ = storeFile_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptor) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptor)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptor other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptor.getDefaultInstance()) return this; + if (other.hasFamilyName()) { + setFamilyName(other.getFamilyName()); + } + if (other.hasStoreHomeDir()) { + bitField0_ |= 0x00000002; + storeHomeDir_ = other.storeHomeDir_; + onChanged(); + } + if (!other.storeFile_.isEmpty()) { + if (storeFile_.isEmpty()) { + storeFile_ = other.storeFile_; + bitField0_ = (bitField0_ & ~0x00000004); + } else { + ensureStoreFileIsMutable(); + storeFile_.addAll(other.storeFile_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasFamilyName()) { + + return false; + } + if (!hasStoreHomeDir()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptor parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptor) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required bytes family_name = 1; + private com.google.protobuf.ByteString familyName_ = com.google.protobuf.ByteString.EMPTY; + /** + * required bytes family_name = 1; + */ + public boolean hasFamilyName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bytes family_name = 1; + */ + public com.google.protobuf.ByteString getFamilyName() { + return familyName_; + } + /** + * required bytes family_name = 1; + */ + public Builder setFamilyName(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + familyName_ = value; + onChanged(); + return this; + } + /** + * required bytes family_name = 1; + */ + public Builder clearFamilyName() { + bitField0_ = (bitField0_ & ~0x00000001); + familyName_ = getDefaultInstance().getFamilyName(); + onChanged(); + return this; + } + + // required string store_home_dir = 2; + private java.lang.Object storeHomeDir_ = ""; + /** + * required string store_home_dir = 2; + * + *
+         *relative to region dir
+         * 
+ */ + public boolean hasStoreHomeDir() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string store_home_dir = 2; + * + *
+         *relative to region dir
+         * 
+ */ + public java.lang.String getStoreHomeDir() { + java.lang.Object ref = storeHomeDir_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + storeHomeDir_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string store_home_dir = 2; + * + *
+         *relative to region dir
+         * 
+ */ + public com.google.protobuf.ByteString + getStoreHomeDirBytes() { + java.lang.Object ref = storeHomeDir_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + storeHomeDir_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string store_home_dir = 2; + * + *
+         *relative to region dir
+         * 
+ */ + public Builder setStoreHomeDir( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + storeHomeDir_ = value; + onChanged(); + return this; + } + /** + * required string store_home_dir = 2; + * + *
+         *relative to region dir
+         * 
+ */ + public Builder clearStoreHomeDir() { + bitField0_ = (bitField0_ & ~0x00000002); + storeHomeDir_ = getDefaultInstance().getStoreHomeDir(); + onChanged(); + return this; + } + /** + * required string store_home_dir = 2; + * + *
+         *relative to region dir
+         * 
+ */ + public Builder setStoreHomeDirBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + storeHomeDir_ = value; + onChanged(); + return this; + } + + // repeated string store_file = 3; + private com.google.protobuf.LazyStringList storeFile_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensureStoreFileIsMutable() { + if (!((bitField0_ & 0x00000004) == 0x00000004)) { + storeFile_ = new com.google.protobuf.LazyStringArrayList(storeFile_); + bitField0_ |= 0x00000004; + } + } + /** + * repeated string store_file = 3; + * + *
+         * relative to store dir
+         * 
+ */ + public java.util.List + getStoreFileList() { + return java.util.Collections.unmodifiableList(storeFile_); + } + /** + * repeated string store_file = 3; + * + *
+         * relative to store dir
+         * 
+ */ + public int getStoreFileCount() { + return storeFile_.size(); + } + /** + * repeated string store_file = 3; + * + *
+         * relative to store dir
+         * 
+ */ + public java.lang.String getStoreFile(int index) { + return storeFile_.get(index); + } + /** + * repeated string store_file = 3; + * + *
+         * relative to store dir
+         * 
+ */ + public com.google.protobuf.ByteString + getStoreFileBytes(int index) { + return storeFile_.getByteString(index); + } + /** + * repeated string store_file = 3; + * + *
+         * relative to store dir
+         * 
+ */ + public Builder setStoreFile( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureStoreFileIsMutable(); + storeFile_.set(index, value); + onChanged(); + return this; + } + /** + * repeated string store_file = 3; + * + *
+         * relative to store dir
+         * 
+ */ + public Builder addStoreFile( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureStoreFileIsMutable(); + storeFile_.add(value); + onChanged(); + return this; + } + /** + * repeated string store_file = 3; + * + *
+         * relative to store dir
+         * 
+ */ + public Builder addAllStoreFile( + java.lang.Iterable values) { + ensureStoreFileIsMutable(); + super.addAll(values, storeFile_); + onChanged(); + return this; + } + /** + * repeated string store_file = 3; + * + *
+         * relative to store dir
+         * 
+ */ + public Builder clearStoreFile() { + storeFile_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + /** + * repeated string store_file = 3; + * + *
+         * relative to store dir
+         * 
+ */ + public Builder addStoreFileBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + ensureStoreFileIsMutable(); + storeFile_.add(value); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:RegionEventDescriptor.StoreDescriptor) + } + + static { + defaultInstance = new StoreDescriptor(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:RegionEventDescriptor.StoreDescriptor) + } + + private int bitField0_; + // required .RegionEventDescriptor.EventType event_type = 1; + public static final int EVENT_TYPE_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.EventType eventType_; + /** + * required .RegionEventDescriptor.EventType event_type = 1; + */ + public boolean hasEventType() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .RegionEventDescriptor.EventType event_type = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.EventType getEventType() { + return eventType_; + } + + // required bytes table_name = 2; + public static final int TABLE_NAME_FIELD_NUMBER = 2; + private com.google.protobuf.ByteString tableName_; + /** + * required bytes table_name = 2; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required bytes table_name = 2; + */ + public com.google.protobuf.ByteString getTableName() { + return tableName_; + } + + // required bytes encoded_region_name = 3; + public static final int ENCODED_REGION_NAME_FIELD_NUMBER = 3; + private com.google.protobuf.ByteString encodedRegionName_; + /** + * required bytes encoded_region_name = 3; + */ + public boolean hasEncodedRegionName() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required bytes encoded_region_name = 3; + */ + public com.google.protobuf.ByteString getEncodedRegionName() { + return encodedRegionName_; + } + + // optional uint64 log_sequence_number = 4; + public static final int LOG_SEQUENCE_NUMBER_FIELD_NUMBER = 4; + private long logSequenceNumber_; + /** + * optional uint64 log_sequence_number = 4; + */ + public boolean hasLogSequenceNumber() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional uint64 log_sequence_number = 4; + */ + public long getLogSequenceNumber() { + return logSequenceNumber_; + } + + // repeated .RegionEventDescriptor.StoreDescriptor stores = 5; + public static final int STORES_FIELD_NUMBER = 5; + private java.util.List stores_; + /** + * repeated .RegionEventDescriptor.StoreDescriptor stores = 5; + */ + public java.util.List getStoresList() { + return stores_; + } + /** + * repeated .RegionEventDescriptor.StoreDescriptor stores = 5; + */ + public java.util.List + getStoresOrBuilderList() { + return stores_; + } + /** + * repeated .RegionEventDescriptor.StoreDescriptor stores = 5; + */ + public int getStoresCount() { + return stores_.size(); + } + /** + * repeated .RegionEventDescriptor.StoreDescriptor stores = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptor getStores(int index) { + return stores_.get(index); + } + /** + * repeated .RegionEventDescriptor.StoreDescriptor stores = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptorOrBuilder getStoresOrBuilder( + int index) { + return stores_.get(index); + } + + // optional .ServerName server = 6; + public static final int SERVER_FIELD_NUMBER = 6; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName server_; + /** + * optional .ServerName server = 6; + * + *
+     * Server who opened the region
+     * 
+ */ + public boolean hasServer() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional .ServerName server = 6; + * + *
+     * Server who opened the region
+     * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServer() { + return server_; + } + /** + * optional .ServerName server = 6; + * + *
+     * Server who opened the region
+     * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerOrBuilder() { + return server_; + } + + private void initFields() { + eventType_ = org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.EventType.REGION_OPEN; + tableName_ = com.google.protobuf.ByteString.EMPTY; + encodedRegionName_ = com.google.protobuf.ByteString.EMPTY; + logSequenceNumber_ = 0L; + stores_ = java.util.Collections.emptyList(); + server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasEventType()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasTableName()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasEncodedRegionName()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getStoresCount(); i++) { + if (!getStores(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + if (hasServer()) { + if (!getServer().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeEnum(1, eventType_.getNumber()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, tableName_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBytes(3, encodedRegionName_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeUInt64(4, logSequenceNumber_); + } + for (int i = 0; i < stores_.size(); i++) { + output.writeMessage(5, stores_.get(i)); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeMessage(6, server_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(1, eventType_.getNumber()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, tableName_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(3, encodedRegionName_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(4, logSequenceNumber_); + } + for (int i = 0; i < stores_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(5, stores_.get(i)); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(6, server_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor other = (org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor) obj; + + boolean result = true; + result = result && (hasEventType() == other.hasEventType()); + if (hasEventType()) { + result = result && + (getEventType() == other.getEventType()); + } + result = result && (hasTableName() == other.hasTableName()); + if (hasTableName()) { + result = result && getTableName() + .equals(other.getTableName()); + } + result = result && (hasEncodedRegionName() == other.hasEncodedRegionName()); + if (hasEncodedRegionName()) { + result = result && getEncodedRegionName() + .equals(other.getEncodedRegionName()); + } + result = result && (hasLogSequenceNumber() == other.hasLogSequenceNumber()); + if (hasLogSequenceNumber()) { + result = result && (getLogSequenceNumber() + == other.getLogSequenceNumber()); + } + result = result && getStoresList() + .equals(other.getStoresList()); + result = result && (hasServer() == other.hasServer()); + if (hasServer()) { + result = result && getServer() + .equals(other.getServer()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasEventType()) { + hash = (37 * hash) + EVENT_TYPE_FIELD_NUMBER; + hash = (53 * hash) + hashEnum(getEventType()); + } + if (hasTableName()) { + hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER; + hash = (53 * hash) + getTableName().hashCode(); + } + if (hasEncodedRegionName()) { + hash = (37 * hash) + ENCODED_REGION_NAME_FIELD_NUMBER; + hash = (53 * hash) + getEncodedRegionName().hashCode(); + } + if (hasLogSequenceNumber()) { + hash = (37 * hash) + LOG_SEQUENCE_NUMBER_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getLogSequenceNumber()); + } + if (getStoresCount() > 0) { + hash = (37 * hash) + STORES_FIELD_NUMBER; + hash = (53 * hash) + getStoresList().hashCode(); + } + if (hasServer()) { + hash = (37 * hash) + SERVER_FIELD_NUMBER; + hash = (53 * hash) + getServer().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code RegionEventDescriptor} + * + *
+     **
+     * Special WAL entry to hold all related to a region event (open/close).
+     * 
+ */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptorOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.WALProtos.internal_static_RegionEventDescriptor_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.WALProtos.internal_static_RegionEventDescriptor_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.class, org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getStoresFieldBuilder(); + getServerFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + eventType_ = org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.EventType.REGION_OPEN; + bitField0_ = (bitField0_ & ~0x00000001); + tableName_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000002); + encodedRegionName_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000004); + logSequenceNumber_ = 0L; + bitField0_ = (bitField0_ & ~0x00000008); + if (storesBuilder_ == null) { + stores_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000010); + } else { + storesBuilder_.clear(); + } + if (serverBuilder_ == null) { + server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + } else { + serverBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000020); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.WALProtos.internal_static_RegionEventDescriptor_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor build() { + org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor result = new org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.eventType_ = eventType_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.tableName_ = tableName_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.encodedRegionName_ = encodedRegionName_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + result.logSequenceNumber_ = logSequenceNumber_; + if (storesBuilder_ == null) { + if (((bitField0_ & 0x00000010) == 0x00000010)) { + stores_ = java.util.Collections.unmodifiableList(stores_); + bitField0_ = (bitField0_ & ~0x00000010); + } + result.stores_ = stores_; + } else { + result.stores_ = storesBuilder_.build(); + } + if (((from_bitField0_ & 0x00000020) == 0x00000020)) { + to_bitField0_ |= 0x00000010; + } + if (serverBuilder_ == null) { + result.server_ = server_; + } else { + result.server_ = serverBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.getDefaultInstance()) return this; + if (other.hasEventType()) { + setEventType(other.getEventType()); + } + if (other.hasTableName()) { + setTableName(other.getTableName()); + } + if (other.hasEncodedRegionName()) { + setEncodedRegionName(other.getEncodedRegionName()); + } + if (other.hasLogSequenceNumber()) { + setLogSequenceNumber(other.getLogSequenceNumber()); + } + if (storesBuilder_ == null) { + if (!other.stores_.isEmpty()) { + if (stores_.isEmpty()) { + stores_ = other.stores_; + bitField0_ = (bitField0_ & ~0x00000010); + } else { + ensureStoresIsMutable(); + stores_.addAll(other.stores_); + } + onChanged(); + } + } else { + if (!other.stores_.isEmpty()) { + if (storesBuilder_.isEmpty()) { + storesBuilder_.dispose(); + storesBuilder_ = null; + stores_ = other.stores_; + bitField0_ = (bitField0_ & ~0x00000010); + storesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getStoresFieldBuilder() : null; + } else { + storesBuilder_.addAllMessages(other.stores_); + } + } + } + if (other.hasServer()) { + mergeServer(other.getServer()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasEventType()) { + + return false; + } + if (!hasTableName()) { + + return false; + } + if (!hasEncodedRegionName()) { + + return false; + } + for (int i = 0; i < getStoresCount(); i++) { + if (!getStores(i).isInitialized()) { + + return false; + } + } + if (hasServer()) { + if (!getServer().isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .RegionEventDescriptor.EventType event_type = 1; + private org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.EventType eventType_ = org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.EventType.REGION_OPEN; + /** + * required .RegionEventDescriptor.EventType event_type = 1; + */ + public boolean hasEventType() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .RegionEventDescriptor.EventType event_type = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.EventType getEventType() { + return eventType_; + } + /** + * required .RegionEventDescriptor.EventType event_type = 1; + */ + public Builder setEventType(org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.EventType value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + eventType_ = value; + onChanged(); + return this; + } + /** + * required .RegionEventDescriptor.EventType event_type = 1; + */ + public Builder clearEventType() { + bitField0_ = (bitField0_ & ~0x00000001); + eventType_ = org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.EventType.REGION_OPEN; + onChanged(); + return this; + } + + // required bytes table_name = 2; + private com.google.protobuf.ByteString tableName_ = com.google.protobuf.ByteString.EMPTY; + /** + * required bytes table_name = 2; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required bytes table_name = 2; + */ + public com.google.protobuf.ByteString getTableName() { + return tableName_; + } + /** + * required bytes table_name = 2; + */ + public Builder setTableName(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + tableName_ = value; + onChanged(); + return this; + } + /** + * required bytes table_name = 2; + */ + public Builder clearTableName() { + bitField0_ = (bitField0_ & ~0x00000002); + tableName_ = getDefaultInstance().getTableName(); + onChanged(); + return this; + } + + // required bytes encoded_region_name = 3; + private com.google.protobuf.ByteString encodedRegionName_ = com.google.protobuf.ByteString.EMPTY; + /** + * required bytes encoded_region_name = 3; + */ + public boolean hasEncodedRegionName() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required bytes encoded_region_name = 3; + */ + public com.google.protobuf.ByteString getEncodedRegionName() { + return encodedRegionName_; + } + /** + * required bytes encoded_region_name = 3; + */ + public Builder setEncodedRegionName(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + encodedRegionName_ = value; + onChanged(); + return this; + } + /** + * required bytes encoded_region_name = 3; + */ + public Builder clearEncodedRegionName() { + bitField0_ = (bitField0_ & ~0x00000004); + encodedRegionName_ = getDefaultInstance().getEncodedRegionName(); + onChanged(); + return this; + } + + // optional uint64 log_sequence_number = 4; + private long logSequenceNumber_ ; + /** + * optional uint64 log_sequence_number = 4; + */ + public boolean hasLogSequenceNumber() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional uint64 log_sequence_number = 4; + */ + public long getLogSequenceNumber() { + return logSequenceNumber_; + } + /** + * optional uint64 log_sequence_number = 4; + */ + public Builder setLogSequenceNumber(long value) { + bitField0_ |= 0x00000008; + logSequenceNumber_ = value; + onChanged(); + return this; + } + /** + * optional uint64 log_sequence_number = 4; + */ + public Builder clearLogSequenceNumber() { + bitField0_ = (bitField0_ & ~0x00000008); + logSequenceNumber_ = 0L; + onChanged(); + return this; + } + + // repeated .RegionEventDescriptor.StoreDescriptor stores = 5; + private java.util.List stores_ = + java.util.Collections.emptyList(); + private void ensureStoresIsMutable() { + if (!((bitField0_ & 0x00000010) == 0x00000010)) { + stores_ = new java.util.ArrayList(stores_); + bitField0_ |= 0x00000010; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptor, org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptor.Builder, org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptorOrBuilder> storesBuilder_; + + /** + * repeated .RegionEventDescriptor.StoreDescriptor stores = 5; + */ + public java.util.List getStoresList() { + if (storesBuilder_ == null) { + return java.util.Collections.unmodifiableList(stores_); + } else { + return storesBuilder_.getMessageList(); + } + } + /** + * repeated .RegionEventDescriptor.StoreDescriptor stores = 5; + */ + public int getStoresCount() { + if (storesBuilder_ == null) { + return stores_.size(); + } else { + return storesBuilder_.getCount(); + } + } + /** + * repeated .RegionEventDescriptor.StoreDescriptor stores = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptor getStores(int index) { + if (storesBuilder_ == null) { + return stores_.get(index); + } else { + return storesBuilder_.getMessage(index); + } + } + /** + * repeated .RegionEventDescriptor.StoreDescriptor stores = 5; + */ + public Builder setStores( + int index, org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptor value) { + if (storesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStoresIsMutable(); + stores_.set(index, value); + onChanged(); + } else { + storesBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .RegionEventDescriptor.StoreDescriptor stores = 5; + */ + public Builder setStores( + int index, org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptor.Builder builderForValue) { + if (storesBuilder_ == null) { + ensureStoresIsMutable(); + stores_.set(index, builderForValue.build()); + onChanged(); + } else { + storesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .RegionEventDescriptor.StoreDescriptor stores = 5; + */ + public Builder addStores(org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptor value) { + if (storesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStoresIsMutable(); + stores_.add(value); + onChanged(); + } else { + storesBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .RegionEventDescriptor.StoreDescriptor stores = 5; + */ + public Builder addStores( + int index, org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptor value) { + if (storesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStoresIsMutable(); + stores_.add(index, value); + onChanged(); + } else { + storesBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .RegionEventDescriptor.StoreDescriptor stores = 5; + */ + public Builder addStores( + org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptor.Builder builderForValue) { + if (storesBuilder_ == null) { + ensureStoresIsMutable(); + stores_.add(builderForValue.build()); + onChanged(); + } else { + storesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .RegionEventDescriptor.StoreDescriptor stores = 5; + */ + public Builder addStores( + int index, org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptor.Builder builderForValue) { + if (storesBuilder_ == null) { + ensureStoresIsMutable(); + stores_.add(index, builderForValue.build()); + onChanged(); + } else { + storesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .RegionEventDescriptor.StoreDescriptor stores = 5; + */ + public Builder addAllStores( + java.lang.Iterable values) { + if (storesBuilder_ == null) { + ensureStoresIsMutable(); + super.addAll(values, stores_); + onChanged(); + } else { + storesBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .RegionEventDescriptor.StoreDescriptor stores = 5; + */ + public Builder clearStores() { + if (storesBuilder_ == null) { + stores_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + } else { + storesBuilder_.clear(); + } + return this; + } + /** + * repeated .RegionEventDescriptor.StoreDescriptor stores = 5; + */ + public Builder removeStores(int index) { + if (storesBuilder_ == null) { + ensureStoresIsMutable(); + stores_.remove(index); + onChanged(); + } else { + storesBuilder_.remove(index); + } + return this; + } + /** + * repeated .RegionEventDescriptor.StoreDescriptor stores = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptor.Builder getStoresBuilder( + int index) { + return getStoresFieldBuilder().getBuilder(index); + } + /** + * repeated .RegionEventDescriptor.StoreDescriptor stores = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptorOrBuilder getStoresOrBuilder( + int index) { + if (storesBuilder_ == null) { + return stores_.get(index); } else { + return storesBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .RegionEventDescriptor.StoreDescriptor stores = 5; + */ + public java.util.List + getStoresOrBuilderList() { + if (storesBuilder_ != null) { + return storesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(stores_); + } + } + /** + * repeated .RegionEventDescriptor.StoreDescriptor stores = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptor.Builder addStoresBuilder() { + return getStoresFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptor.getDefaultInstance()); + } + /** + * repeated .RegionEventDescriptor.StoreDescriptor stores = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptor.Builder addStoresBuilder( + int index) { + return getStoresFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptor.getDefaultInstance()); + } + /** + * repeated .RegionEventDescriptor.StoreDescriptor stores = 5; + */ + public java.util.List + getStoresBuilderList() { + return getStoresFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptor, org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptor.Builder, org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptorOrBuilder> + getStoresFieldBuilder() { + if (storesBuilder_ == null) { + storesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptor, org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptor.Builder, org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptorOrBuilder>( + stores_, + ((bitField0_ & 0x00000010) == 0x00000010), + getParentForChildren(), + isClean()); + stores_ = null; + } + return storesBuilder_; + } + + // optional .ServerName server = 6; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> serverBuilder_; + /** + * optional .ServerName server = 6; + * + *
+       * Server who opened the region
+       * 
+ */ + public boolean hasServer() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * optional .ServerName server = 6; + * + *
+       * Server who opened the region
+       * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServer() { + if (serverBuilder_ == null) { + return server_; + } else { + return serverBuilder_.getMessage(); + } + } + /** + * optional .ServerName server = 6; + * + *
+       * Server who opened the region
+       * 
+ */ + public Builder setServer(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (serverBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + server_ = value; + onChanged(); + } else { + serverBuilder_.setMessage(value); + } + bitField0_ |= 0x00000020; + return this; + } + /** + * optional .ServerName server = 6; + * + *
+       * Server who opened the region
+       * 
+ */ + public Builder setServer( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { + if (serverBuilder_ == null) { + server_ = builderForValue.build(); + onChanged(); + } else { + serverBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000020; + return this; + } + /** + * optional .ServerName server = 6; + * + *
+       * Server who opened the region
+       * 
+ */ + public Builder mergeServer(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (serverBuilder_ == null) { + if (((bitField0_ & 0x00000020) == 0x00000020) && + server_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()) { + server_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(server_).mergeFrom(value).buildPartial(); + } else { + server_ = value; + } + onChanged(); + } else { + serverBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000020; + return this; + } + /** + * optional .ServerName server = 6; + * + *
+       * Server who opened the region
+       * 
+ */ + public Builder clearServer() { + if (serverBuilder_ == null) { + server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + onChanged(); + } else { + serverBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000020); + return this; + } + /** + * optional .ServerName server = 6; + * + *
+       * Server who opened the region
+       * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getServerBuilder() { + bitField0_ |= 0x00000020; + onChanged(); + return getServerFieldBuilder().getBuilder(); + } + /** + * optional .ServerName server = 6; + * + *
+       * Server who opened the region
+       * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerOrBuilder() { + if (serverBuilder_ != null) { + return serverBuilder_.getMessageOrBuilder(); + } else { + return server_; + } + } + /** + * optional .ServerName server = 6; + * + *
+       * Server who opened the region
+       * 
+ */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> + getServerFieldBuilder() { + if (serverBuilder_ == null) { + serverBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>( + server_, + getParentForChildren(), + isClean()); + server_ = null; + } + return serverBuilder_; + } + + // @@protoc_insertion_point(builder_scope:RegionEventDescriptor) + } + + static { + defaultInstance = new RegionEventDescriptor(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:RegionEventDescriptor) + } + public interface WALTrailerOrBuilder extends com.google.protobuf.MessageOrBuilder { } @@ -7980,6 +10393,16 @@ public final class WALProtos { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_FlushDescriptor_StoreFlushDescriptor_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_RegionEventDescriptor_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_RegionEventDescriptor_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_RegionEventDescriptor_StoreDescriptor_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_RegionEventDescriptor_StoreDescriptor_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_WALTrailer_descriptor; private static @@ -8021,10 +10444,20 @@ public final class WALProtos { "name\030\001 \002(\014\022\026\n\016store_home_dir\030\002 \002(\t\022\024\n\014fl" + "ush_output\030\003 \003(\t\"A\n\013FlushAction\022\017\n\013START" + "_FLUSH\020\000\022\020\n\014COMMIT_FLUSH\020\001\022\017\n\013ABORT_FLUS" + - "H\020\002\"\014\n\nWALTrailer*F\n\tScopeType\022\033\n\027REPLIC" + - "ATION_SCOPE_LOCAL\020\000\022\034\n\030REPLICATION_SCOPE" + - "_GLOBAL\020\001B?\n*org.apache.hadoop.hbase.pro", - "tobuf.generatedB\tWALProtosH\001\210\001\000\240\001\001" + "H\020\002\"\364\002\n\025RegionEventDescriptor\0224\n\nevent_t" + + "ype\030\001 \002(\0162 .RegionEventDescriptor.EventT" + + "ype\022\022\n\ntable_name\030\002 \002(\014\022\033\n\023encoded_regio", + "n_name\030\003 \002(\014\022\033\n\023log_sequence_number\030\004 \001(" + + "\004\0226\n\006stores\030\005 \003(\0132&.RegionEventDescripto" + + "r.StoreDescriptor\022\033\n\006server\030\006 \001(\0132\013.Serv" + + "erName\032R\n\017StoreDescriptor\022\023\n\013family_name" + + "\030\001 \002(\014\022\026\n\016store_home_dir\030\002 \002(\t\022\022\n\nstore_" + + "file\030\003 \003(\t\".\n\tEventType\022\017\n\013REGION_OPEN\020\000" + + "\022\020\n\014REGION_CLOSE\020\001\"\014\n\nWALTrailer*F\n\tScop" + + "eType\022\033\n\027REPLICATION_SCOPE_LOCAL\020\000\022\034\n\030RE" + + "PLICATION_SCOPE_GLOBAL\020\001B?\n*org.apache.h" + + "adoop.hbase.protobuf.generatedB\tWALProto", + "sH\001\210\001\000\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -8067,8 +10500,20 @@ public final class WALProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_FlushDescriptor_StoreFlushDescriptor_descriptor, new java.lang.String[] { "FamilyName", "StoreHomeDir", "FlushOutput", }); - internal_static_WALTrailer_descriptor = + internal_static_RegionEventDescriptor_descriptor = getDescriptor().getMessageTypes().get(5); + internal_static_RegionEventDescriptor_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_RegionEventDescriptor_descriptor, + new java.lang.String[] { "EventType", "TableName", "EncodedRegionName", "LogSequenceNumber", "Stores", "Server", }); + internal_static_RegionEventDescriptor_StoreDescriptor_descriptor = + internal_static_RegionEventDescriptor_descriptor.getNestedTypes().get(0); + internal_static_RegionEventDescriptor_StoreDescriptor_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_RegionEventDescriptor_StoreDescriptor_descriptor, + new java.lang.String[] { "FamilyName", "StoreHomeDir", "StoreFile", }); + internal_static_WALTrailer_descriptor = + getDescriptor().getMessageTypes().get(6); internal_static_WALTrailer_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_WALTrailer_descriptor, diff --git a/hbase-protocol/src/main/protobuf/WAL.proto b/hbase-protocol/src/main/protobuf/WAL.proto index 1c88b264406..dae92d2eabd 100644 --- a/hbase-protocol/src/main/protobuf/WAL.proto +++ b/hbase-protocol/src/main/protobuf/WAL.proto @@ -120,6 +120,29 @@ message FlushDescriptor { repeated StoreFlushDescriptor store_flushes = 5; } +/** + * Special WAL entry to hold all related to a region event (open/close). + */ +message RegionEventDescriptor { + enum EventType { + REGION_OPEN = 0; + REGION_CLOSE = 1; + } + + message StoreDescriptor { + required bytes family_name = 1; + required string store_home_dir = 2; //relative to region dir + repeated string store_file = 3; // relative to store dir + } + + required EventType event_type = 1; + required bytes table_name = 2; + required bytes encoded_region_name = 3; + optional uint64 log_sequence_number = 4; + repeated StoreDescriptor stores = 5; + optional ServerName server = 6; // Server who opened the region +} + /** * A trailer that is appended to the end of a properly closed HLog WAL file. * If missing, this is either a legacy or a corrupted WAL file. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 6f74b4d9fbe..274c1b3c5d6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -120,6 +120,7 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServic import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor; +import org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.FlushAction; import org.apache.hadoop.hbase.regionserver.MultiVersionConsistencyControl.WriteEntry; import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; @@ -725,6 +726,8 @@ public class HRegion implements HeapSize { // , Writable{ status.setStatus("Writing region info on filesystem"); fs.checkRegionInfoOnFilesystem(); + + // Initialize all the HStores status.setStatus("Initializing all the Stores"); long maxSeqId = initializeRegionStores(reporter, status); @@ -761,6 +764,7 @@ public class HRegion implements HeapSize { // , Writable{ // overlaps used sequence numbers nextSeqid += this.flushPerChanges + 10000000; // add another extra 10million } + LOG.info("Onlined " + this.getRegionInfo().getShortNameToLog() + "; next sequenceid=" + nextSeqid); @@ -850,6 +854,44 @@ public class HRegion implements HeapSize { // , Writable{ return maxSeqId; } + private void writeRegionOpenMarker(HLog log, long openSeqId) throws IOException { + Map> storeFiles + = new TreeMap>(Bytes.BYTES_COMPARATOR); + for (Map.Entry entry : getStores().entrySet()) { + Store store = entry.getValue(); + ArrayList storeFileNames = new ArrayList(); + for (StoreFile storeFile: store.getStorefiles()) { + storeFileNames.add(storeFile.getPath()); + } + storeFiles.put(entry.getKey(), storeFileNames); + } + + RegionEventDescriptor regionOpenDesc = ProtobufUtil.toRegionEventDescriptor( + RegionEventDescriptor.EventType.REGION_OPEN, getRegionInfo(), openSeqId, + getRegionServerServices().getServerName(), storeFiles); + HLogUtil.writeRegionEventMarker(log, getTableDesc(), getRegionInfo(), regionOpenDesc, + getSequenceId()); + } + + private void writeRegionCloseMarker(HLog log) throws IOException { + Map> storeFiles + = new TreeMap>(Bytes.BYTES_COMPARATOR); + for (Map.Entry entry : getStores().entrySet()) { + Store store = entry.getValue(); + ArrayList storeFileNames = new ArrayList(); + for (StoreFile storeFile: store.getStorefiles()) { + storeFileNames.add(storeFile.getPath()); + } + storeFiles.put(entry.getKey(), storeFileNames); + } + + RegionEventDescriptor regionEventDesc = ProtobufUtil.toRegionEventDescriptor( + RegionEventDescriptor.EventType.REGION_CLOSE, getRegionInfo(), getSequenceId().get(), + getRegionServerServices().getServerName(), storeFiles); + HLogUtil.writeRegionEventMarker(log, getTableDesc(), getRegionInfo(), regionEventDesc, + getSequenceId()); + } + /** * @return True if this region has references. */ @@ -1227,6 +1269,12 @@ public class HRegion implements HeapSize { // , Writable{ storeCloserThreadPool.shutdownNow(); } } + + status.setStatus("Writing region close event to WAL"); + if (!abort && log != null && getRegionServerServices() != null) { + writeRegionCloseMarker(log); + } + this.closed.set(true); if (memstoreSize.get() != 0) LOG.error("Memstore size is " + memstoreSize.get()); if (coprocessorHost != null) { @@ -3550,6 +3598,7 @@ public class HRegion implements HeapSize { // , Writable{ } return storeFileNames; } + ////////////////////////////////////////////////////////////////////////////// // Support code ////////////////////////////////////////////////////////////////////////////// @@ -4601,6 +4650,9 @@ public class HRegion implements HeapSize { // , Writable{ this.openSeqNum = initialize(reporter); this.setSequenceId(openSeqNum); + if (log != null && getRegionServerServices() != null) { + writeRegionOpenMarker(log, openSeqNum); + } return this; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogUtil.java index 2c4652b2d79..ce353b6e093 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogUtil.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor; +import org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor; import org.apache.hadoop.hbase.util.FSUtils; import com.google.protobuf.TextFormat; @@ -284,4 +285,20 @@ public class HLogUtil { } return trx; } + + /** + * Write a region open marker indicating that the region is opened + */ + public static long writeRegionEventMarker(HLog log, HTableDescriptor htd, HRegionInfo info, + final RegionEventDescriptor r, AtomicLong sequenceId) throws IOException { + TableName tn = TableName.valueOf(r.getTableName().toByteArray()); + HLogKey key = new HLogKey(info.getEncodedNameAsBytes(), tn); + long trx = log.appendNoSync(htd, info, key, WALEdit.createRegionEventWALEdit(info, r), + sequenceId, false, null); + log.sync(trx); + if (LOG.isTraceEnabled()) { + LOG.trace("Appended region event marker " + TextFormat.shortDebugString(r)); + } + return trx; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java index f684d7dfe30..4b38027af80 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.codec.Codec; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor; +import org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ClassSize; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; @@ -86,6 +87,7 @@ public class WALEdit implements Writable, HeapSize { static final byte [] METAROW = Bytes.toBytes("METAROW"); static final byte[] COMPACTION = Bytes.toBytes("HBASE::COMPACTION"); static final byte [] FLUSH = Bytes.toBytes("HBASE::FLUSH"); + static final byte [] REGION_EVENT = Bytes.toBytes("HBASE::REGION_EVENT"); private final int VERSION_2 = -1; private final boolean isReplay; @@ -277,6 +279,20 @@ public class WALEdit implements Writable, HeapSize { return null; } + public static WALEdit createRegionEventWALEdit(HRegionInfo hri, + RegionEventDescriptor regionEventDesc) { + KeyValue kv = new KeyValue(getRowForRegion(hri), METAFAMILY, REGION_EVENT, + EnvironmentEdgeManager.currentTimeMillis(), regionEventDesc.toByteArray()); + return new WALEdit().add(kv); + } + + public static RegionEventDescriptor getRegionEventDescriptor(Cell cell) throws IOException { + if (CellUtil.matchingColumn(cell, METAFAMILY, REGION_EVENT)) { + return RegionEventDescriptor.parseFrom(cell.getValue()); + } + return null; + } + /** * Create a compacion WALEdit * @param c diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java index 5735eaa2957..289b6302487 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java @@ -116,7 +116,7 @@ public class TestDistributedLogSplitting { Logger.getLogger("org.apache.hadoop.hbase").setLevel(Level.DEBUG); // test ThreeRSAbort fails under hadoop2 (2.0.2-alpha) if shortcircuit-read (scr) is on. this - // turns it off for this test. TODO: Figure out why scr breaks recovery. + // turns it off for this test. TODO: Figure out why scr breaks recovery. System.setProperty("hbase.tests.use.shortcircuit.reads", "false"); } @@ -176,7 +176,7 @@ public class TestDistributedLogSplitting { // refresh configuration conf = HBaseConfiguration.create(originalConf); } - + @After public void after() throws Exception { try { @@ -191,7 +191,7 @@ public class TestDistributedLogSplitting { ZKUtil.deleteNodeRecursively(TEST_UTIL.getZooKeeperWatcher(), "/hbase"); } } - + @Test (timeout=300000) public void testRecoveredEdits() throws Exception { LOG.info("testRecoveredEdits"); @@ -482,7 +482,7 @@ public class TestDistributedLogSplitting { ht.close(); zkw.close(); } - + @Test(timeout = 300000) public void testMasterStartsUpWithLogReplayWork() throws Exception { LOG.info("testMasterStartsUpWithLogReplayWork"); @@ -704,7 +704,7 @@ public class TestDistributedLogSplitting { this.prepareData(ht, Bytes.toBytes("family"), Bytes.toBytes("c1")); String originalCheckSum = TEST_UTIL.checksumRows(ht); - + // abort RA and trigger replay abortRSAndWaitForRecovery(hrs, zkw, NUM_REGIONS_TO_CREATE); @@ -777,10 +777,10 @@ public class TestDistributedLogSplitting { } makeHLog(hrs.getWAL(), regions, "disableTable", "family", NUM_LOG_LINES, 100, false); makeHLog(hrs.getWAL(), regions, "table", "family", NUM_LOG_LINES, 100); - + LOG.info("Disabling table\n"); TEST_UTIL.getHBaseAdmin().disableTable(TableName.valueOf("disableTable")); - + // abort RS LOG.info("Aborting region server: " + hrs.getServerName()); hrs.abort("testing"); @@ -837,7 +837,7 @@ public class TestDistributedLogSplitting { assertEquals(NUM_LOG_LINES, count); LOG.info("Verify replayed edits"); assertEquals(NUM_LOG_LINES, TEST_UTIL.countRows(ht)); - + // clean up for (HRegionInfo hri : regions) { Path editsdir = @@ -879,7 +879,7 @@ public class TestDistributedLogSplitting { dstRS = rsts.get((i+1) % NUM_RS).getRegionServer(); break; } - + slm.markRegionsRecoveringInZK(hrs.getServerName(), regionSet); // move region in order for the region opened in recovering state final HRegionInfo hri = region; @@ -896,7 +896,7 @@ public class TestDistributedLogSplitting { return (sn != null && sn.equals(tmpRS.getServerName())); } }); - + try { byte[] key = region.getStartKey(); if (key == null || key.length == 0) { @@ -955,6 +955,7 @@ public class TestDistributedLogSplitting { "table", "family", NUM_LOG_LINES, 100); new Thread() { + @Override public void run() { waitForCounter(tot_wkr_task_acquired, 0, 1, 1000); for (RegionServerThread rst : rsts) { @@ -1145,7 +1146,7 @@ public class TestDistributedLogSplitting { assertTrue(isMetaRegionInRecovery); master.getMasterFileSystem().splitMetaLog(hrs.getServerName()); - + isMetaRegionInRecovery = false; recoveringRegions = zkw.getRecoverableZooKeeper().getChildren(zkw.recoveringRegionsZNode, false); @@ -1317,7 +1318,7 @@ public class TestDistributedLogSplitting { WALEdit e = new WALEdit(); value++; e.add(new KeyValue(row, family, qualifier, timeStamp, Bytes.toBytes(value))); - hrs.getWAL().append(curRegionInfo, TableName.valueOf(tableName), e, + hrs.getWAL().append(curRegionInfo, TableName.valueOf(tableName), e, System.currentTimeMillis(), htd, sequenceId); } hrs.getWAL().sync(); @@ -1325,7 +1326,7 @@ public class TestDistributedLogSplitting { // wait for abort completes this.abortRSAndWaitForRecovery(hrs, zkw, NUM_REGIONS_TO_CREATE); - + // verify we got the last value LOG.info("Verification Starts..."); Get g = new Get(row); @@ -1337,7 +1338,7 @@ public class TestDistributedLogSplitting { LOG.info("Verification after flush..."); TEST_UTIL.getHBaseAdmin().flush(tableName); TEST_UTIL.getHBaseAdmin().compact(tableName); - + // wait for compaction completes TEST_UTIL.waitFor(30000, 200, new Waiter.Predicate() { @Override @@ -1356,7 +1357,7 @@ public class TestDistributedLogSplitting { return installTable(zkw, tname, fname, nrs, 0); } - HTable installTable(ZooKeeperWatcher zkw, String tname, String fname, int nrs, + HTable installTable(ZooKeeperWatcher zkw, String tname, String fname, int nrs, int existingRegions) throws Exception { // Create a table with regions TableName table = TableName.valueOf(tname); @@ -1497,8 +1498,11 @@ public class TestDistributedLogSplitting { throws IOException { int count = 0; HLog.Reader in = HLogFactory.createReader(fs, log, conf); - while (in.next() != null) { - count++; + HLog.Entry e; + while ((e = in.next()) != null) { + if (!WALEdit.isMetaEditFamily(e.getEdit().getKeyValues().get(0))) { + count++; + } } return count; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index 3b98533d52c..9fa430f9c12 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -34,8 +34,10 @@ import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyBoolean; import static org.mockito.Matchers.anyLong; import static org.mockito.Matchers.argThat; +import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; @@ -86,6 +88,7 @@ import org.apache.hadoop.hbase.MultithreadedTestUtil; import org.apache.hadoop.hbase.MultithreadedTestUtil.RepeatingTestThread; import org.apache.hadoop.hbase.MultithreadedTestUtil.TestThread; import org.apache.hadoop.hbase.NotServingRegionException; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.client.Append; @@ -114,8 +117,10 @@ import org.apache.hadoop.hbase.monitoring.TaskMonitor; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor; +import org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.FlushAction; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor; +import org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.StoreDescriptor; import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl; import org.apache.hadoop.hbase.regionserver.HRegion.RowLock; import org.apache.hadoop.hbase.regionserver.TestStore.FaultyFileSystem; @@ -141,6 +146,7 @@ import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.TestName; +import org.mockito.ArgumentCaptor; import org.mockito.ArgumentMatcher; import org.mockito.Mockito; @@ -5481,6 +5487,136 @@ public class TestHRegion { this.region = null; } + @Test + @SuppressWarnings("unchecked") + public void testOpenRegionWrittenToWAL() throws Exception { + final ServerName serverName = ServerName.valueOf("testOpenRegionWrittenToWAL", 100, 42); + final RegionServerServices rss = spy(TEST_UTIL.createMockRegionServerService(serverName)); + + HTableDescriptor htd + = new HTableDescriptor(TableName.valueOf("testOpenRegionWrittenToWAL")); + htd.addFamily(new HColumnDescriptor(fam1)); + htd.addFamily(new HColumnDescriptor(fam2)); + + HRegionInfo hri = new HRegionInfo(htd.getTableName(), + HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY); + + // open the region w/o rss and log and flush some files + HRegion region = + HRegion.createHRegion(hri, TEST_UTIL.getDataTestDir(), TEST_UTIL + .getConfiguration(), htd); + assertNotNull(region); + + // create a file in fam1 for the region before opening in OpenRegionHandler + region.put(new Put(Bytes.toBytes("a")).add(fam1, fam1, fam1)); + region.flushcache(); + region.close(); + + ArgumentCaptor editCaptor = ArgumentCaptor.forClass(WALEdit.class); + + // capture appendNoSync() calls + HLog log = mock(HLog.class); + when(rss.getWAL((HRegionInfo) any())).thenReturn(log); + + try { + region = HRegion.openHRegion(hri, htd, rss.getWAL(hri), + TEST_UTIL.getConfiguration(), rss, null); + + verify(log, times(1)).appendNoSync((HTableDescriptor)any(), (HRegionInfo)any(), (HLogKey)any() + , editCaptor.capture(), (AtomicLong)any(), anyBoolean(), (List)any()); + + WALEdit edit = editCaptor.getValue(); + assertNotNull(edit); + assertNotNull(edit.getKeyValues()); + assertEquals(1, edit.getKeyValues().size()); + RegionEventDescriptor desc = WALEdit.getRegionEventDescriptor(edit.getKeyValues().get(0)); + assertNotNull(desc); + + LOG.info("RegionEventDescriptor from WAL: " + desc); + + assertEquals(RegionEventDescriptor.EventType.REGION_OPEN, desc.getEventType()); + assertTrue(Bytes.equals(desc.getTableName().toByteArray(), htd.getName())); + assertTrue(Bytes.equals(desc.getEncodedRegionName().toByteArray(), + hri.getEncodedNameAsBytes())); + assertTrue(desc.getLogSequenceNumber() > 0); + assertEquals(serverName, ProtobufUtil.toServerName(desc.getServer())); + assertEquals(2, desc.getStoresCount()); + + StoreDescriptor store = desc.getStores(0); + assertTrue(Bytes.equals(store.getFamilyName().toByteArray(), fam1)); + assertEquals(store.getStoreHomeDir(), Bytes.toString(fam1)); + assertEquals(1, store.getStoreFileCount()); // 1store file + assertFalse(store.getStoreFile(0).contains("/")); // ensure path is relative + + store = desc.getStores(1); + assertTrue(Bytes.equals(store.getFamilyName().toByteArray(), fam2)); + assertEquals(store.getStoreHomeDir(), Bytes.toString(fam2)); + assertEquals(0, store.getStoreFileCount()); // no store files + + } finally { + HRegion.closeHRegion(region); + } + } + + @Test + @SuppressWarnings("unchecked") + public void testCloseRegionWrittenToWAL() throws Exception { + final ServerName serverName = ServerName.valueOf("testCloseRegionWrittenToWAL", 100, 42); + final RegionServerServices rss = spy(TEST_UTIL.createMockRegionServerService(serverName)); + + HTableDescriptor htd + = new HTableDescriptor(TableName.valueOf("testOpenRegionWrittenToWAL")); + htd.addFamily(new HColumnDescriptor(fam1)); + htd.addFamily(new HColumnDescriptor(fam2)); + + HRegionInfo hri = new HRegionInfo(htd.getTableName(), + HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY); + + ArgumentCaptor editCaptor = ArgumentCaptor.forClass(WALEdit.class); + + // capture appendNoSync() calls + HLog log = mock(HLog.class); + when(rss.getWAL((HRegionInfo) any())).thenReturn(log); + + // open a region first so that it can be closed later + region = HRegion.openHRegion(hri, htd, rss.getWAL(hri), + TEST_UTIL.getConfiguration(), rss, null); + + // close the region + region.close(false); + + // 2 times, one for region open, the other close region + verify(log, times(2)).appendNoSync((HTableDescriptor)any(), (HRegionInfo)any(), (HLogKey)any(), + editCaptor.capture(), (AtomicLong)any(), anyBoolean(), (List)any()); + + WALEdit edit = editCaptor.getAllValues().get(1); + assertNotNull(edit); + assertNotNull(edit.getKeyValues()); + assertEquals(1, edit.getKeyValues().size()); + RegionEventDescriptor desc = WALEdit.getRegionEventDescriptor(edit.getKeyValues().get(0)); + assertNotNull(desc); + + LOG.info("RegionEventDescriptor from WAL: " + desc); + + assertEquals(RegionEventDescriptor.EventType.REGION_CLOSE, desc.getEventType()); + assertTrue(Bytes.equals(desc.getTableName().toByteArray(), htd.getName())); + assertTrue(Bytes.equals(desc.getEncodedRegionName().toByteArray(), + hri.getEncodedNameAsBytes())); + assertTrue(desc.getLogSequenceNumber() > 0); + assertEquals(serverName, ProtobufUtil.toServerName(desc.getServer())); + assertEquals(2, desc.getStoresCount()); + + StoreDescriptor store = desc.getStores(0); + assertTrue(Bytes.equals(store.getFamilyName().toByteArray(), fam1)); + assertEquals(store.getStoreHomeDir(), Bytes.toString(fam1)); + assertEquals(0, store.getStoreFileCount()); // no store files + + store = desc.getStores(1); + assertTrue(Bytes.equals(store.getFamilyName().toByteArray(), fam2)); + assertEquals(store.getStoreHomeDir(), Bytes.toString(fam2)); + assertEquals(0, store.getStoreFileCount()); // no store files + } + private static HRegion initHRegion(byte[] tableName, String callingMethod, byte[]... families) throws IOException { return initHRegion(tableName, callingMethod, HBaseConfiguration.create(), diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java index ea368df2c79..0edad8bfdc7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java @@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.regionserver.wal; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; +import static org.mockito.Mockito.when; import java.io.IOException; import java.security.PrivilegedExceptionAction; @@ -100,7 +101,7 @@ public class TestWALReplay { private FileSystem fs; private Configuration conf; private RecoveryMode mode; - + @BeforeClass public static void setUpBeforeClass() throws Exception { @@ -131,7 +132,7 @@ public class TestWALReplay { if (TEST_UTIL.getDFSCluster().getFileSystem().exists(this.hbaseRootDir)) { TEST_UTIL.getDFSCluster().getFileSystem().delete(this.hbaseRootDir, true); } - this.mode = (conf.getBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, false) ? + this.mode = (conf.getBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, false) ? RecoveryMode.LOG_REPLAY : RecoveryMode.LOG_SPLITTING); } @@ -152,7 +153,7 @@ public class TestWALReplay { } /** - * + * * @throws Exception */ @Test @@ -354,6 +355,7 @@ public class TestWALReplay { User user = HBaseTestingUtility.getDifferentUser(newConf, tableName.getNameAsString()); user.runAs(new PrivilegedExceptionAction() { + @Override public Object run() throws Exception { runWALSplit(newConf); HLog wal2 = createWAL(newConf); @@ -425,6 +427,7 @@ public class TestWALReplay { User user = HBaseTestingUtility.getDifferentUser(newConf, tableName.getNameAsString()); user.runAs(new PrivilegedExceptionAction() { + @Override public Object run() throws Exception { runWALSplit(newConf); HLog wal2 = createWAL(newConf); @@ -518,6 +521,7 @@ public class TestWALReplay { User user = HBaseTestingUtility.getDifferentUser(newConf, tableName.getNameAsString()); user.runAs(new PrivilegedExceptionAction() { + @Override public Object run() throws Exception { runWALSplit(newConf); FileSystem newFS = FileSystem.get(newConf); @@ -669,6 +673,7 @@ public class TestWALReplay { HLog wal = createWAL(this.conf); RegionServerServices rsServices = Mockito.mock(RegionServerServices.class); Mockito.doReturn(false).when(rsServices).isAborted(); + when(rsServices.getServerName()).thenReturn(ServerName.valueOf("foo", 10, 10)); Configuration customConf = new Configuration(this.conf); customConf.set(DefaultStoreEngine.DEFAULT_STORE_FLUSHER_CLASS_KEY, CustomStoreFlusher.class.getName()); @@ -802,6 +807,7 @@ public class TestWALReplay { User user = HBaseTestingUtility.getDifferentUser(newConf, ".replay.wal.secondtime"); user.runAs(new PrivilegedExceptionAction() { + @Override public Object run() throws Exception { runWALSplit(newConf); FileSystem newFS = FileSystem.get(newConf); @@ -813,6 +819,7 @@ public class TestWALReplay { try { final HRegion region = new HRegion(basedir, newWal, newFS, newConf, hri, htd, null) { + @Override protected FlushResult internalFlushcache( final HLog wal, final long myseqid, MonitoredTask status) throws IOException { @@ -886,7 +893,7 @@ public class TestWALReplay { for (FileStatus fileStatus : listStatus1) { editCount = Integer.parseInt(fileStatus.getPath().getName()); } - // The sequence number should be same + // The sequence number should be same assertEquals( "The sequence number of the recoverd.edits and the current edit seq should be same", lastestSeqNumber, editCount); @@ -914,7 +921,7 @@ public class TestWALReplay { htd.addFamily(a); return htd; } - + private MockHLog createMockWAL(Configuration conf) throws IOException { MockHLog wal = new MockHLog(FileSystem.get(conf), hbaseRootDir, logName, conf); // Set down maximum recovery so we dfsclient doesn't linger retrying something @@ -940,7 +947,7 @@ public class TestWALReplay { @Override public void requestDelayedFlush(HRegion region, long when) { // TODO Auto-generated method stub - + } @Override @@ -1021,7 +1028,7 @@ public class TestWALReplay { * @throws IOException */ private HLog createWAL(final Configuration c) throws IOException { - HLog wal = HLogFactory.createHLog(FileSystem.get(c), + HLog wal = HLogFactory.createHLog(FileSystem.get(c), hbaseRootDir, logName, c); // Set down maximum recovery so we dfsclient doesn't linger retrying something // long gone.