From 4cc464dae1f2f70e5538bf7aaf91a66c853c447e Mon Sep 17 00:00:00 2001 From: Matteo Bertozzi Date: Thu, 9 Apr 2015 21:21:18 +0100 Subject: [PATCH] HBASE-13209 Procedure V2 - master Add/Modify/Delete Column Family (Stephen Yuan Jiang) --- .../generated/MasterProcedureProtos.java | 3951 ++++++++++++++++- .../src/main/protobuf/MasterProcedure.proto | 46 + .../apache/hadoop/hbase/master/HMaster.java | 30 +- .../handler/TableDeleteFamilyHandler.java | 6 +- .../procedure/AddColumnFamilyProcedure.java | 407 ++ .../DeleteColumnFamilyProcedure.java | 439 ++ .../ModifyColumnFamilyProcedure.java | 382 ++ .../hbase/master/TestTableLockManager.java | 31 - .../handler/TestTableDeleteFamilyHandler.java | 128 +- .../TestTableDescriptorModification.java | 124 +- .../MasterProcedureTestingUtility.java | 33 + .../TestAddColumnFamilyProcedure.java | 245 + .../TestDeleteColumnFamilyProcedure.java | 301 ++ .../TestModifyColumnFamilyProcedure.java | 237 + 14 files changed, 6280 insertions(+), 80 deletions(-) create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AddColumnFamilyProcedure.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteColumnFamilyProcedure.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyColumnFamilyProcedure.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestAddColumnFamilyProcedure.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteColumnFamilyProcedure.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyColumnFamilyProcedure.java diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java index 98260c1f95f..4713a0a3865 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java @@ -371,6 +371,342 @@ public final class MasterProcedureProtos { // @@protoc_insertion_point(enum_scope:DeleteTableState) } + /** + * Protobuf enum {@code AddColumnFamilyState} + */ + public enum AddColumnFamilyState + implements com.google.protobuf.ProtocolMessageEnum { + /** + * ADD_COLUMN_FAMILY_PREPARE = 1; + */ + ADD_COLUMN_FAMILY_PREPARE(0, 1), + /** + * ADD_COLUMN_FAMILY_PRE_OPERATION = 2; + */ + ADD_COLUMN_FAMILY_PRE_OPERATION(1, 2), + /** + * ADD_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR = 3; + */ + ADD_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR(2, 3), + /** + * ADD_COLUMN_FAMILY_POST_OPERATION = 4; + */ + ADD_COLUMN_FAMILY_POST_OPERATION(3, 4), + /** + * ADD_COLUMN_FAMILY_REOPEN_ALL_REGIONS = 5; + */ + ADD_COLUMN_FAMILY_REOPEN_ALL_REGIONS(4, 5), + ; + + /** + * ADD_COLUMN_FAMILY_PREPARE = 1; + */ + public static final int ADD_COLUMN_FAMILY_PREPARE_VALUE = 1; + /** + * ADD_COLUMN_FAMILY_PRE_OPERATION = 2; + */ + public static final int ADD_COLUMN_FAMILY_PRE_OPERATION_VALUE = 2; + /** + * ADD_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR = 3; + */ + public static final int ADD_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR_VALUE = 3; + /** + * ADD_COLUMN_FAMILY_POST_OPERATION = 4; + */ + public static final int ADD_COLUMN_FAMILY_POST_OPERATION_VALUE = 4; + /** + * ADD_COLUMN_FAMILY_REOPEN_ALL_REGIONS = 5; + */ + public static final int ADD_COLUMN_FAMILY_REOPEN_ALL_REGIONS_VALUE = 5; + + + public final int getNumber() { return value; } + + public static AddColumnFamilyState valueOf(int value) { + switch (value) { + case 1: return ADD_COLUMN_FAMILY_PREPARE; + case 2: return ADD_COLUMN_FAMILY_PRE_OPERATION; + case 3: return ADD_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR; + case 4: return ADD_COLUMN_FAMILY_POST_OPERATION; + case 5: return ADD_COLUMN_FAMILY_REOPEN_ALL_REGIONS; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public AddColumnFamilyState findValueByNumber(int number) { + return AddColumnFamilyState.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(3); + } + + private static final AddColumnFamilyState[] VALUES = values(); + + public static AddColumnFamilyState valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private AddColumnFamilyState(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:AddColumnFamilyState) + } + + /** + * Protobuf enum {@code ModifyColumnFamilyState} + */ + public enum ModifyColumnFamilyState + implements com.google.protobuf.ProtocolMessageEnum { + /** + * MODIFY_COLUMN_FAMILY_PREPARE = 1; + */ + MODIFY_COLUMN_FAMILY_PREPARE(0, 1), + /** + * MODIFY_COLUMN_FAMILY_PRE_OPERATION = 2; + */ + MODIFY_COLUMN_FAMILY_PRE_OPERATION(1, 2), + /** + * MODIFY_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR = 3; + */ + MODIFY_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR(2, 3), + /** + * MODIFY_COLUMN_FAMILY_POST_OPERATION = 4; + */ + MODIFY_COLUMN_FAMILY_POST_OPERATION(3, 4), + /** + * MODIFY_COLUMN_FAMILY_REOPEN_ALL_REGIONS = 5; + */ + MODIFY_COLUMN_FAMILY_REOPEN_ALL_REGIONS(4, 5), + ; + + /** + * MODIFY_COLUMN_FAMILY_PREPARE = 1; + */ + public static final int MODIFY_COLUMN_FAMILY_PREPARE_VALUE = 1; + /** + * MODIFY_COLUMN_FAMILY_PRE_OPERATION = 2; + */ + public static final int MODIFY_COLUMN_FAMILY_PRE_OPERATION_VALUE = 2; + /** + * MODIFY_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR = 3; + */ + public static final int MODIFY_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR_VALUE = 3; + /** + * MODIFY_COLUMN_FAMILY_POST_OPERATION = 4; + */ + public static final int MODIFY_COLUMN_FAMILY_POST_OPERATION_VALUE = 4; + /** + * MODIFY_COLUMN_FAMILY_REOPEN_ALL_REGIONS = 5; + */ + public static final int MODIFY_COLUMN_FAMILY_REOPEN_ALL_REGIONS_VALUE = 5; + + + public final int getNumber() { return value; } + + public static ModifyColumnFamilyState valueOf(int value) { + switch (value) { + case 1: return MODIFY_COLUMN_FAMILY_PREPARE; + case 2: return MODIFY_COLUMN_FAMILY_PRE_OPERATION; + case 3: return MODIFY_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR; + case 4: return MODIFY_COLUMN_FAMILY_POST_OPERATION; + case 5: return MODIFY_COLUMN_FAMILY_REOPEN_ALL_REGIONS; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public ModifyColumnFamilyState findValueByNumber(int number) { + return ModifyColumnFamilyState.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(4); + } + + private static final ModifyColumnFamilyState[] VALUES = values(); + + public static ModifyColumnFamilyState valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private ModifyColumnFamilyState(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:ModifyColumnFamilyState) + } + + /** + * Protobuf enum {@code DeleteColumnFamilyState} + */ + public enum DeleteColumnFamilyState + implements com.google.protobuf.ProtocolMessageEnum { + /** + * DELETE_COLUMN_FAMILY_PREPARE = 1; + */ + DELETE_COLUMN_FAMILY_PREPARE(0, 1), + /** + * DELETE_COLUMN_FAMILY_PRE_OPERATION = 2; + */ + DELETE_COLUMN_FAMILY_PRE_OPERATION(1, 2), + /** + * DELETE_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR = 3; + */ + DELETE_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR(2, 3), + /** + * DELETE_COLUMN_FAMILY_DELETE_FS_LAYOUT = 4; + */ + DELETE_COLUMN_FAMILY_DELETE_FS_LAYOUT(3, 4), + /** + * DELETE_COLUMN_FAMILY_POST_OPERATION = 5; + */ + DELETE_COLUMN_FAMILY_POST_OPERATION(4, 5), + /** + * DELETE_COLUMN_FAMILY_REOPEN_ALL_REGIONS = 6; + */ + DELETE_COLUMN_FAMILY_REOPEN_ALL_REGIONS(5, 6), + ; + + /** + * DELETE_COLUMN_FAMILY_PREPARE = 1; + */ + public static final int DELETE_COLUMN_FAMILY_PREPARE_VALUE = 1; + /** + * DELETE_COLUMN_FAMILY_PRE_OPERATION = 2; + */ + public static final int DELETE_COLUMN_FAMILY_PRE_OPERATION_VALUE = 2; + /** + * DELETE_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR = 3; + */ + public static final int DELETE_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR_VALUE = 3; + /** + * DELETE_COLUMN_FAMILY_DELETE_FS_LAYOUT = 4; + */ + public static final int DELETE_COLUMN_FAMILY_DELETE_FS_LAYOUT_VALUE = 4; + /** + * DELETE_COLUMN_FAMILY_POST_OPERATION = 5; + */ + public static final int DELETE_COLUMN_FAMILY_POST_OPERATION_VALUE = 5; + /** + * DELETE_COLUMN_FAMILY_REOPEN_ALL_REGIONS = 6; + */ + public static final int DELETE_COLUMN_FAMILY_REOPEN_ALL_REGIONS_VALUE = 6; + + + public final int getNumber() { return value; } + + public static DeleteColumnFamilyState valueOf(int value) { + switch (value) { + case 1: return DELETE_COLUMN_FAMILY_PREPARE; + case 2: return DELETE_COLUMN_FAMILY_PRE_OPERATION; + case 3: return DELETE_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR; + case 4: return DELETE_COLUMN_FAMILY_DELETE_FS_LAYOUT; + case 5: return DELETE_COLUMN_FAMILY_POST_OPERATION; + case 6: return DELETE_COLUMN_FAMILY_REOPEN_ALL_REGIONS; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public DeleteColumnFamilyState findValueByNumber(int number) { + return DeleteColumnFamilyState.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(5); + } + + private static final DeleteColumnFamilyState[] VALUES = values(); + + public static DeleteColumnFamilyState valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private DeleteColumnFamilyState(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:DeleteColumnFamilyState) + } + public interface CreateTableStateDataOrBuilder extends com.google.protobuf.MessageOrBuilder { @@ -3776,6 +4112,3514 @@ public final class MasterProcedureProtos { // @@protoc_insertion_point(class_scope:DeleteTableStateData) } + public interface AddColumnFamilyStateDataOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .UserInformation user_info = 1; + /** + * required .UserInformation user_info = 1; + */ + boolean hasUserInfo(); + /** + * required .UserInformation user_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo(); + /** + * required .UserInformation user_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder(); + + // required .TableName table_name = 2; + /** + * required .TableName table_name = 2; + */ + boolean hasTableName(); + /** + * required .TableName table_name = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName(); + /** + * required .TableName table_name = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder(); + + // required .ColumnFamilySchema columnfamily_schema = 3; + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + boolean hasColumnfamilySchema(); + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema getColumnfamilySchema(); + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder getColumnfamilySchemaOrBuilder(); + + // optional .TableSchema unmodified_table_schema = 4; + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + boolean hasUnmodifiedTableSchema(); + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getUnmodifiedTableSchema(); + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getUnmodifiedTableSchemaOrBuilder(); + } + /** + * Protobuf type {@code AddColumnFamilyStateData} + */ + public static final class AddColumnFamilyStateData extends + com.google.protobuf.GeneratedMessage + implements AddColumnFamilyStateDataOrBuilder { + // Use AddColumnFamilyStateData.newBuilder() to construct. + private AddColumnFamilyStateData(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private AddColumnFamilyStateData(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final AddColumnFamilyStateData defaultInstance; + public static AddColumnFamilyStateData getDefaultInstance() { + return defaultInstance; + } + + public AddColumnFamilyStateData getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private AddColumnFamilyStateData( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = userInfo_.toBuilder(); + } + userInfo_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(userInfo_); + userInfo_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 18: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + subBuilder = tableName_.toBuilder(); + } + tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(tableName_); + tableName_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000002; + break; + } + case 26: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder subBuilder = null; + if (((bitField0_ & 0x00000004) == 0x00000004)) { + subBuilder = columnfamilySchema_.toBuilder(); + } + columnfamilySchema_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(columnfamilySchema_); + columnfamilySchema_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000004; + break; + } + case 34: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder subBuilder = null; + if (((bitField0_ & 0x00000008) == 0x00000008)) { + subBuilder = unmodifiedTableSchema_.toBuilder(); + } + unmodifiedTableSchema_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(unmodifiedTableSchema_); + unmodifiedTableSchema_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000008; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_AddColumnFamilyStateData_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_AddColumnFamilyStateData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public AddColumnFamilyStateData parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new AddColumnFamilyStateData(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .UserInformation user_info = 1; + public static final int USER_INFO_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation userInfo_; + /** + * required .UserInformation user_info = 1; + */ + public boolean hasUserInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo() { + return userInfo_; + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() { + return userInfo_; + } + + // required .TableName table_name = 2; + public static final int TABLE_NAME_FIELD_NUMBER = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_; + /** + * required .TableName table_name = 2; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + return tableName_; + } + /** + * required .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + return tableName_; + } + + // required .ColumnFamilySchema columnfamily_schema = 3; + public static final int COLUMNFAMILY_SCHEMA_FIELD_NUMBER = 3; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema columnfamilySchema_; + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + public boolean hasColumnfamilySchema() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema getColumnfamilySchema() { + return columnfamilySchema_; + } + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder getColumnfamilySchemaOrBuilder() { + return columnfamilySchema_; + } + + // optional .TableSchema unmodified_table_schema = 4; + public static final int UNMODIFIED_TABLE_SCHEMA_FIELD_NUMBER = 4; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema unmodifiedTableSchema_; + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public boolean hasUnmodifiedTableSchema() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getUnmodifiedTableSchema() { + return unmodifiedTableSchema_; + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getUnmodifiedTableSchemaOrBuilder() { + return unmodifiedTableSchema_; + } + + private void initFields() { + userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + columnfamilySchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.getDefaultInstance(); + unmodifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasUserInfo()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasTableName()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasColumnfamilySchema()) { + memoizedIsInitialized = 0; + return false; + } + if (!getUserInfo().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + if (!getTableName().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + if (!getColumnfamilySchema().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + if (hasUnmodifiedTableSchema()) { + if (!getUnmodifiedTableSchema().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, userInfo_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, tableName_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeMessage(3, columnfamilySchema_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeMessage(4, unmodifiedTableSchema_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, userInfo_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, tableName_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, columnfamilySchema_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(4, unmodifiedTableSchema_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData other = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData) obj; + + boolean result = true; + result = result && (hasUserInfo() == other.hasUserInfo()); + if (hasUserInfo()) { + result = result && getUserInfo() + .equals(other.getUserInfo()); + } + result = result && (hasTableName() == other.hasTableName()); + if (hasTableName()) { + result = result && getTableName() + .equals(other.getTableName()); + } + result = result && (hasColumnfamilySchema() == other.hasColumnfamilySchema()); + if (hasColumnfamilySchema()) { + result = result && getColumnfamilySchema() + .equals(other.getColumnfamilySchema()); + } + result = result && (hasUnmodifiedTableSchema() == other.hasUnmodifiedTableSchema()); + if (hasUnmodifiedTableSchema()) { + result = result && getUnmodifiedTableSchema() + .equals(other.getUnmodifiedTableSchema()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasUserInfo()) { + hash = (37 * hash) + USER_INFO_FIELD_NUMBER; + hash = (53 * hash) + getUserInfo().hashCode(); + } + if (hasTableName()) { + hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER; + hash = (53 * hash) + getTableName().hashCode(); + } + if (hasColumnfamilySchema()) { + hash = (37 * hash) + COLUMNFAMILY_SCHEMA_FIELD_NUMBER; + hash = (53 * hash) + getColumnfamilySchema().hashCode(); + } + if (hasUnmodifiedTableSchema()) { + hash = (37 * hash) + UNMODIFIED_TABLE_SCHEMA_FIELD_NUMBER; + hash = (53 * hash) + getUnmodifiedTableSchema().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code AddColumnFamilyStateData} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateDataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_AddColumnFamilyStateData_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_AddColumnFamilyStateData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getUserInfoFieldBuilder(); + getTableNameFieldBuilder(); + getColumnfamilySchemaFieldBuilder(); + getUnmodifiedTableSchemaFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (userInfoBuilder_ == null) { + userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); + } else { + userInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + } else { + tableNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + if (columnfamilySchemaBuilder_ == null) { + columnfamilySchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.getDefaultInstance(); + } else { + columnfamilySchemaBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + if (unmodifiedTableSchemaBuilder_ == null) { + unmodifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + } else { + unmodifiedTableSchemaBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000008); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_AddColumnFamilyStateData_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData result = new org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (userInfoBuilder_ == null) { + result.userInfo_ = userInfo_; + } else { + result.userInfo_ = userInfoBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + if (tableNameBuilder_ == null) { + result.tableName_ = tableName_; + } else { + result.tableName_ = tableNameBuilder_.build(); + } + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + if (columnfamilySchemaBuilder_ == null) { + result.columnfamilySchema_ = columnfamilySchema_; + } else { + result.columnfamilySchema_ = columnfamilySchemaBuilder_.build(); + } + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + if (unmodifiedTableSchemaBuilder_ == null) { + result.unmodifiedTableSchema_ = unmodifiedTableSchema_; + } else { + result.unmodifiedTableSchema_ = unmodifiedTableSchemaBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData.getDefaultInstance()) return this; + if (other.hasUserInfo()) { + mergeUserInfo(other.getUserInfo()); + } + if (other.hasTableName()) { + mergeTableName(other.getTableName()); + } + if (other.hasColumnfamilySchema()) { + mergeColumnfamilySchema(other.getColumnfamilySchema()); + } + if (other.hasUnmodifiedTableSchema()) { + mergeUnmodifiedTableSchema(other.getUnmodifiedTableSchema()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasUserInfo()) { + + return false; + } + if (!hasTableName()) { + + return false; + } + if (!hasColumnfamilySchema()) { + + return false; + } + if (!getUserInfo().isInitialized()) { + + return false; + } + if (!getTableName().isInitialized()) { + + return false; + } + if (!getColumnfamilySchema().isInitialized()) { + + return false; + } + if (hasUnmodifiedTableSchema()) { + if (!getUnmodifiedTableSchema().isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .UserInformation user_info = 1; + private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder> userInfoBuilder_; + /** + * required .UserInformation user_info = 1; + */ + public boolean hasUserInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo() { + if (userInfoBuilder_ == null) { + return userInfo_; + } else { + return userInfoBuilder_.getMessage(); + } + } + /** + * required .UserInformation user_info = 1; + */ + public Builder setUserInfo(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation value) { + if (userInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + userInfo_ = value; + onChanged(); + } else { + userInfoBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .UserInformation user_info = 1; + */ + public Builder setUserInfo( + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder builderForValue) { + if (userInfoBuilder_ == null) { + userInfo_ = builderForValue.build(); + onChanged(); + } else { + userInfoBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .UserInformation user_info = 1; + */ + public Builder mergeUserInfo(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation value) { + if (userInfoBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + userInfo_ != org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance()) { + userInfo_ = + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.newBuilder(userInfo_).mergeFrom(value).buildPartial(); + } else { + userInfo_ = value; + } + onChanged(); + } else { + userInfoBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .UserInformation user_info = 1; + */ + public Builder clearUserInfo() { + if (userInfoBuilder_ == null) { + userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); + onChanged(); + } else { + userInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder getUserInfoBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getUserInfoFieldBuilder().getBuilder(); + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() { + if (userInfoBuilder_ != null) { + return userInfoBuilder_.getMessageOrBuilder(); + } else { + return userInfo_; + } + } + /** + * required .UserInformation user_info = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder> + getUserInfoFieldBuilder() { + if (userInfoBuilder_ == null) { + userInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder>( + userInfo_, + getParentForChildren(), + isClean()); + userInfo_ = null; + } + return userInfoBuilder_; + } + + // required .TableName table_name = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_; + /** + * required .TableName table_name = 2; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + if (tableNameBuilder_ == null) { + return tableName_; + } else { + return tableNameBuilder_.getMessage(); + } + } + /** + * required .TableName table_name = 2; + */ + public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableName_ = value; + onChanged(); + } else { + tableNameBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .TableName table_name = 2; + */ + public Builder setTableName( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + tableName_ = builderForValue.build(); + onChanged(); + } else { + tableNameBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .TableName table_name = 2; + */ + public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) { + tableName_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial(); + } else { + tableName_ = value; + } + onChanged(); + } else { + tableNameBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .TableName table_name = 2; + */ + public Builder clearTableName() { + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + onChanged(); + } else { + tableNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + /** + * required .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getTableNameFieldBuilder().getBuilder(); + } + /** + * required .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + if (tableNameBuilder_ != null) { + return tableNameBuilder_.getMessageOrBuilder(); + } else { + return tableName_; + } + } + /** + * required .TableName table_name = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableNameFieldBuilder() { + if (tableNameBuilder_ == null) { + tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + tableName_, + getParentForChildren(), + isClean()); + tableName_ = null; + } + return tableNameBuilder_; + } + + // required .ColumnFamilySchema columnfamily_schema = 3; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema columnfamilySchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder> columnfamilySchemaBuilder_; + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + public boolean hasColumnfamilySchema() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema getColumnfamilySchema() { + if (columnfamilySchemaBuilder_ == null) { + return columnfamilySchema_; + } else { + return columnfamilySchemaBuilder_.getMessage(); + } + } + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + public Builder setColumnfamilySchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema value) { + if (columnfamilySchemaBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + columnfamilySchema_ = value; + onChanged(); + } else { + columnfamilySchemaBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + public Builder setColumnfamilySchema( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder builderForValue) { + if (columnfamilySchemaBuilder_ == null) { + columnfamilySchema_ = builderForValue.build(); + onChanged(); + } else { + columnfamilySchemaBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + public Builder mergeColumnfamilySchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema value) { + if (columnfamilySchemaBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004) && + columnfamilySchema_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.getDefaultInstance()) { + columnfamilySchema_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.newBuilder(columnfamilySchema_).mergeFrom(value).buildPartial(); + } else { + columnfamilySchema_ = value; + } + onChanged(); + } else { + columnfamilySchemaBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + public Builder clearColumnfamilySchema() { + if (columnfamilySchemaBuilder_ == null) { + columnfamilySchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.getDefaultInstance(); + onChanged(); + } else { + columnfamilySchemaBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder getColumnfamilySchemaBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return getColumnfamilySchemaFieldBuilder().getBuilder(); + } + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder getColumnfamilySchemaOrBuilder() { + if (columnfamilySchemaBuilder_ != null) { + return columnfamilySchemaBuilder_.getMessageOrBuilder(); + } else { + return columnfamilySchema_; + } + } + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder> + getColumnfamilySchemaFieldBuilder() { + if (columnfamilySchemaBuilder_ == null) { + columnfamilySchemaBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder>( + columnfamilySchema_, + getParentForChildren(), + isClean()); + columnfamilySchema_ = null; + } + return columnfamilySchemaBuilder_; + } + + // optional .TableSchema unmodified_table_schema = 4; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema unmodifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> unmodifiedTableSchemaBuilder_; + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public boolean hasUnmodifiedTableSchema() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getUnmodifiedTableSchema() { + if (unmodifiedTableSchemaBuilder_ == null) { + return unmodifiedTableSchema_; + } else { + return unmodifiedTableSchemaBuilder_.getMessage(); + } + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public Builder setUnmodifiedTableSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) { + if (unmodifiedTableSchemaBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + unmodifiedTableSchema_ = value; + onChanged(); + } else { + unmodifiedTableSchemaBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + return this; + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public Builder setUnmodifiedTableSchema( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder builderForValue) { + if (unmodifiedTableSchemaBuilder_ == null) { + unmodifiedTableSchema_ = builderForValue.build(); + onChanged(); + } else { + unmodifiedTableSchemaBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + return this; + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public Builder mergeUnmodifiedTableSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) { + if (unmodifiedTableSchemaBuilder_ == null) { + if (((bitField0_ & 0x00000008) == 0x00000008) && + unmodifiedTableSchema_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance()) { + unmodifiedTableSchema_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.newBuilder(unmodifiedTableSchema_).mergeFrom(value).buildPartial(); + } else { + unmodifiedTableSchema_ = value; + } + onChanged(); + } else { + unmodifiedTableSchemaBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000008; + return this; + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public Builder clearUnmodifiedTableSchema() { + if (unmodifiedTableSchemaBuilder_ == null) { + unmodifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + onChanged(); + } else { + unmodifiedTableSchemaBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000008); + return this; + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder getUnmodifiedTableSchemaBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return getUnmodifiedTableSchemaFieldBuilder().getBuilder(); + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getUnmodifiedTableSchemaOrBuilder() { + if (unmodifiedTableSchemaBuilder_ != null) { + return unmodifiedTableSchemaBuilder_.getMessageOrBuilder(); + } else { + return unmodifiedTableSchema_; + } + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> + getUnmodifiedTableSchemaFieldBuilder() { + if (unmodifiedTableSchemaBuilder_ == null) { + unmodifiedTableSchemaBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>( + unmodifiedTableSchema_, + getParentForChildren(), + isClean()); + unmodifiedTableSchema_ = null; + } + return unmodifiedTableSchemaBuilder_; + } + + // @@protoc_insertion_point(builder_scope:AddColumnFamilyStateData) + } + + static { + defaultInstance = new AddColumnFamilyStateData(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:AddColumnFamilyStateData) + } + + public interface ModifyColumnFamilyStateDataOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .UserInformation user_info = 1; + /** + * required .UserInformation user_info = 1; + */ + boolean hasUserInfo(); + /** + * required .UserInformation user_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo(); + /** + * required .UserInformation user_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder(); + + // required .TableName table_name = 2; + /** + * required .TableName table_name = 2; + */ + boolean hasTableName(); + /** + * required .TableName table_name = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName(); + /** + * required .TableName table_name = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder(); + + // required .ColumnFamilySchema columnfamily_schema = 3; + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + boolean hasColumnfamilySchema(); + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema getColumnfamilySchema(); + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder getColumnfamilySchemaOrBuilder(); + + // optional .TableSchema unmodified_table_schema = 4; + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + boolean hasUnmodifiedTableSchema(); + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getUnmodifiedTableSchema(); + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getUnmodifiedTableSchemaOrBuilder(); + } + /** + * Protobuf type {@code ModifyColumnFamilyStateData} + */ + public static final class ModifyColumnFamilyStateData extends + com.google.protobuf.GeneratedMessage + implements ModifyColumnFamilyStateDataOrBuilder { + // Use ModifyColumnFamilyStateData.newBuilder() to construct. + private ModifyColumnFamilyStateData(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ModifyColumnFamilyStateData(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ModifyColumnFamilyStateData defaultInstance; + public static ModifyColumnFamilyStateData getDefaultInstance() { + return defaultInstance; + } + + public ModifyColumnFamilyStateData getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ModifyColumnFamilyStateData( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = userInfo_.toBuilder(); + } + userInfo_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(userInfo_); + userInfo_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 18: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + subBuilder = tableName_.toBuilder(); + } + tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(tableName_); + tableName_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000002; + break; + } + case 26: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder subBuilder = null; + if (((bitField0_ & 0x00000004) == 0x00000004)) { + subBuilder = columnfamilySchema_.toBuilder(); + } + columnfamilySchema_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(columnfamilySchema_); + columnfamilySchema_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000004; + break; + } + case 34: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder subBuilder = null; + if (((bitField0_ & 0x00000008) == 0x00000008)) { + subBuilder = unmodifiedTableSchema_.toBuilder(); + } + unmodifiedTableSchema_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(unmodifiedTableSchema_); + unmodifiedTableSchema_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000008; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_ModifyColumnFamilyStateData_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_ModifyColumnFamilyStateData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ModifyColumnFamilyStateData parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ModifyColumnFamilyStateData(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .UserInformation user_info = 1; + public static final int USER_INFO_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation userInfo_; + /** + * required .UserInformation user_info = 1; + */ + public boolean hasUserInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo() { + return userInfo_; + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() { + return userInfo_; + } + + // required .TableName table_name = 2; + public static final int TABLE_NAME_FIELD_NUMBER = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_; + /** + * required .TableName table_name = 2; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + return tableName_; + } + /** + * required .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + return tableName_; + } + + // required .ColumnFamilySchema columnfamily_schema = 3; + public static final int COLUMNFAMILY_SCHEMA_FIELD_NUMBER = 3; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema columnfamilySchema_; + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + public boolean hasColumnfamilySchema() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema getColumnfamilySchema() { + return columnfamilySchema_; + } + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder getColumnfamilySchemaOrBuilder() { + return columnfamilySchema_; + } + + // optional .TableSchema unmodified_table_schema = 4; + public static final int UNMODIFIED_TABLE_SCHEMA_FIELD_NUMBER = 4; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema unmodifiedTableSchema_; + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public boolean hasUnmodifiedTableSchema() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getUnmodifiedTableSchema() { + return unmodifiedTableSchema_; + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getUnmodifiedTableSchemaOrBuilder() { + return unmodifiedTableSchema_; + } + + private void initFields() { + userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + columnfamilySchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.getDefaultInstance(); + unmodifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasUserInfo()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasTableName()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasColumnfamilySchema()) { + memoizedIsInitialized = 0; + return false; + } + if (!getUserInfo().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + if (!getTableName().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + if (!getColumnfamilySchema().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + if (hasUnmodifiedTableSchema()) { + if (!getUnmodifiedTableSchema().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, userInfo_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, tableName_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeMessage(3, columnfamilySchema_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeMessage(4, unmodifiedTableSchema_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, userInfo_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, tableName_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, columnfamilySchema_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(4, unmodifiedTableSchema_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData other = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData) obj; + + boolean result = true; + result = result && (hasUserInfo() == other.hasUserInfo()); + if (hasUserInfo()) { + result = result && getUserInfo() + .equals(other.getUserInfo()); + } + result = result && (hasTableName() == other.hasTableName()); + if (hasTableName()) { + result = result && getTableName() + .equals(other.getTableName()); + } + result = result && (hasColumnfamilySchema() == other.hasColumnfamilySchema()); + if (hasColumnfamilySchema()) { + result = result && getColumnfamilySchema() + .equals(other.getColumnfamilySchema()); + } + result = result && (hasUnmodifiedTableSchema() == other.hasUnmodifiedTableSchema()); + if (hasUnmodifiedTableSchema()) { + result = result && getUnmodifiedTableSchema() + .equals(other.getUnmodifiedTableSchema()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasUserInfo()) { + hash = (37 * hash) + USER_INFO_FIELD_NUMBER; + hash = (53 * hash) + getUserInfo().hashCode(); + } + if (hasTableName()) { + hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER; + hash = (53 * hash) + getTableName().hashCode(); + } + if (hasColumnfamilySchema()) { + hash = (37 * hash) + COLUMNFAMILY_SCHEMA_FIELD_NUMBER; + hash = (53 * hash) + getColumnfamilySchema().hashCode(); + } + if (hasUnmodifiedTableSchema()) { + hash = (37 * hash) + UNMODIFIED_TABLE_SCHEMA_FIELD_NUMBER; + hash = (53 * hash) + getUnmodifiedTableSchema().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code ModifyColumnFamilyStateData} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateDataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_ModifyColumnFamilyStateData_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_ModifyColumnFamilyStateData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getUserInfoFieldBuilder(); + getTableNameFieldBuilder(); + getColumnfamilySchemaFieldBuilder(); + getUnmodifiedTableSchemaFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (userInfoBuilder_ == null) { + userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); + } else { + userInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + } else { + tableNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + if (columnfamilySchemaBuilder_ == null) { + columnfamilySchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.getDefaultInstance(); + } else { + columnfamilySchemaBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + if (unmodifiedTableSchemaBuilder_ == null) { + unmodifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + } else { + unmodifiedTableSchemaBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000008); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_ModifyColumnFamilyStateData_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData result = new org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (userInfoBuilder_ == null) { + result.userInfo_ = userInfo_; + } else { + result.userInfo_ = userInfoBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + if (tableNameBuilder_ == null) { + result.tableName_ = tableName_; + } else { + result.tableName_ = tableNameBuilder_.build(); + } + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + if (columnfamilySchemaBuilder_ == null) { + result.columnfamilySchema_ = columnfamilySchema_; + } else { + result.columnfamilySchema_ = columnfamilySchemaBuilder_.build(); + } + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + if (unmodifiedTableSchemaBuilder_ == null) { + result.unmodifiedTableSchema_ = unmodifiedTableSchema_; + } else { + result.unmodifiedTableSchema_ = unmodifiedTableSchemaBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData.getDefaultInstance()) return this; + if (other.hasUserInfo()) { + mergeUserInfo(other.getUserInfo()); + } + if (other.hasTableName()) { + mergeTableName(other.getTableName()); + } + if (other.hasColumnfamilySchema()) { + mergeColumnfamilySchema(other.getColumnfamilySchema()); + } + if (other.hasUnmodifiedTableSchema()) { + mergeUnmodifiedTableSchema(other.getUnmodifiedTableSchema()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasUserInfo()) { + + return false; + } + if (!hasTableName()) { + + return false; + } + if (!hasColumnfamilySchema()) { + + return false; + } + if (!getUserInfo().isInitialized()) { + + return false; + } + if (!getTableName().isInitialized()) { + + return false; + } + if (!getColumnfamilySchema().isInitialized()) { + + return false; + } + if (hasUnmodifiedTableSchema()) { + if (!getUnmodifiedTableSchema().isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .UserInformation user_info = 1; + private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder> userInfoBuilder_; + /** + * required .UserInformation user_info = 1; + */ + public boolean hasUserInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo() { + if (userInfoBuilder_ == null) { + return userInfo_; + } else { + return userInfoBuilder_.getMessage(); + } + } + /** + * required .UserInformation user_info = 1; + */ + public Builder setUserInfo(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation value) { + if (userInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + userInfo_ = value; + onChanged(); + } else { + userInfoBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .UserInformation user_info = 1; + */ + public Builder setUserInfo( + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder builderForValue) { + if (userInfoBuilder_ == null) { + userInfo_ = builderForValue.build(); + onChanged(); + } else { + userInfoBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .UserInformation user_info = 1; + */ + public Builder mergeUserInfo(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation value) { + if (userInfoBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + userInfo_ != org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance()) { + userInfo_ = + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.newBuilder(userInfo_).mergeFrom(value).buildPartial(); + } else { + userInfo_ = value; + } + onChanged(); + } else { + userInfoBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .UserInformation user_info = 1; + */ + public Builder clearUserInfo() { + if (userInfoBuilder_ == null) { + userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); + onChanged(); + } else { + userInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder getUserInfoBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getUserInfoFieldBuilder().getBuilder(); + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() { + if (userInfoBuilder_ != null) { + return userInfoBuilder_.getMessageOrBuilder(); + } else { + return userInfo_; + } + } + /** + * required .UserInformation user_info = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder> + getUserInfoFieldBuilder() { + if (userInfoBuilder_ == null) { + userInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder>( + userInfo_, + getParentForChildren(), + isClean()); + userInfo_ = null; + } + return userInfoBuilder_; + } + + // required .TableName table_name = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_; + /** + * required .TableName table_name = 2; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + if (tableNameBuilder_ == null) { + return tableName_; + } else { + return tableNameBuilder_.getMessage(); + } + } + /** + * required .TableName table_name = 2; + */ + public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableName_ = value; + onChanged(); + } else { + tableNameBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .TableName table_name = 2; + */ + public Builder setTableName( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + tableName_ = builderForValue.build(); + onChanged(); + } else { + tableNameBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .TableName table_name = 2; + */ + public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) { + tableName_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial(); + } else { + tableName_ = value; + } + onChanged(); + } else { + tableNameBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .TableName table_name = 2; + */ + public Builder clearTableName() { + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + onChanged(); + } else { + tableNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + /** + * required .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getTableNameFieldBuilder().getBuilder(); + } + /** + * required .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + if (tableNameBuilder_ != null) { + return tableNameBuilder_.getMessageOrBuilder(); + } else { + return tableName_; + } + } + /** + * required .TableName table_name = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableNameFieldBuilder() { + if (tableNameBuilder_ == null) { + tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + tableName_, + getParentForChildren(), + isClean()); + tableName_ = null; + } + return tableNameBuilder_; + } + + // required .ColumnFamilySchema columnfamily_schema = 3; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema columnfamilySchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder> columnfamilySchemaBuilder_; + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + public boolean hasColumnfamilySchema() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema getColumnfamilySchema() { + if (columnfamilySchemaBuilder_ == null) { + return columnfamilySchema_; + } else { + return columnfamilySchemaBuilder_.getMessage(); + } + } + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + public Builder setColumnfamilySchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema value) { + if (columnfamilySchemaBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + columnfamilySchema_ = value; + onChanged(); + } else { + columnfamilySchemaBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + public Builder setColumnfamilySchema( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder builderForValue) { + if (columnfamilySchemaBuilder_ == null) { + columnfamilySchema_ = builderForValue.build(); + onChanged(); + } else { + columnfamilySchemaBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + public Builder mergeColumnfamilySchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema value) { + if (columnfamilySchemaBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004) && + columnfamilySchema_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.getDefaultInstance()) { + columnfamilySchema_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.newBuilder(columnfamilySchema_).mergeFrom(value).buildPartial(); + } else { + columnfamilySchema_ = value; + } + onChanged(); + } else { + columnfamilySchemaBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + public Builder clearColumnfamilySchema() { + if (columnfamilySchemaBuilder_ == null) { + columnfamilySchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.getDefaultInstance(); + onChanged(); + } else { + columnfamilySchemaBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder getColumnfamilySchemaBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return getColumnfamilySchemaFieldBuilder().getBuilder(); + } + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder getColumnfamilySchemaOrBuilder() { + if (columnfamilySchemaBuilder_ != null) { + return columnfamilySchemaBuilder_.getMessageOrBuilder(); + } else { + return columnfamilySchema_; + } + } + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder> + getColumnfamilySchemaFieldBuilder() { + if (columnfamilySchemaBuilder_ == null) { + columnfamilySchemaBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder>( + columnfamilySchema_, + getParentForChildren(), + isClean()); + columnfamilySchema_ = null; + } + return columnfamilySchemaBuilder_; + } + + // optional .TableSchema unmodified_table_schema = 4; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema unmodifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> unmodifiedTableSchemaBuilder_; + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public boolean hasUnmodifiedTableSchema() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getUnmodifiedTableSchema() { + if (unmodifiedTableSchemaBuilder_ == null) { + return unmodifiedTableSchema_; + } else { + return unmodifiedTableSchemaBuilder_.getMessage(); + } + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public Builder setUnmodifiedTableSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) { + if (unmodifiedTableSchemaBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + unmodifiedTableSchema_ = value; + onChanged(); + } else { + unmodifiedTableSchemaBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + return this; + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public Builder setUnmodifiedTableSchema( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder builderForValue) { + if (unmodifiedTableSchemaBuilder_ == null) { + unmodifiedTableSchema_ = builderForValue.build(); + onChanged(); + } else { + unmodifiedTableSchemaBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + return this; + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public Builder mergeUnmodifiedTableSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) { + if (unmodifiedTableSchemaBuilder_ == null) { + if (((bitField0_ & 0x00000008) == 0x00000008) && + unmodifiedTableSchema_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance()) { + unmodifiedTableSchema_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.newBuilder(unmodifiedTableSchema_).mergeFrom(value).buildPartial(); + } else { + unmodifiedTableSchema_ = value; + } + onChanged(); + } else { + unmodifiedTableSchemaBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000008; + return this; + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public Builder clearUnmodifiedTableSchema() { + if (unmodifiedTableSchemaBuilder_ == null) { + unmodifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + onChanged(); + } else { + unmodifiedTableSchemaBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000008); + return this; + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder getUnmodifiedTableSchemaBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return getUnmodifiedTableSchemaFieldBuilder().getBuilder(); + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getUnmodifiedTableSchemaOrBuilder() { + if (unmodifiedTableSchemaBuilder_ != null) { + return unmodifiedTableSchemaBuilder_.getMessageOrBuilder(); + } else { + return unmodifiedTableSchema_; + } + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> + getUnmodifiedTableSchemaFieldBuilder() { + if (unmodifiedTableSchemaBuilder_ == null) { + unmodifiedTableSchemaBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>( + unmodifiedTableSchema_, + getParentForChildren(), + isClean()); + unmodifiedTableSchema_ = null; + } + return unmodifiedTableSchemaBuilder_; + } + + // @@protoc_insertion_point(builder_scope:ModifyColumnFamilyStateData) + } + + static { + defaultInstance = new ModifyColumnFamilyStateData(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:ModifyColumnFamilyStateData) + } + + public interface DeleteColumnFamilyStateDataOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .UserInformation user_info = 1; + /** + * required .UserInformation user_info = 1; + */ + boolean hasUserInfo(); + /** + * required .UserInformation user_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo(); + /** + * required .UserInformation user_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder(); + + // required .TableName table_name = 2; + /** + * required .TableName table_name = 2; + */ + boolean hasTableName(); + /** + * required .TableName table_name = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName(); + /** + * required .TableName table_name = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder(); + + // required bytes columnfamily_name = 3; + /** + * required bytes columnfamily_name = 3; + */ + boolean hasColumnfamilyName(); + /** + * required bytes columnfamily_name = 3; + */ + com.google.protobuf.ByteString getColumnfamilyName(); + + // optional .TableSchema unmodified_table_schema = 4; + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + boolean hasUnmodifiedTableSchema(); + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getUnmodifiedTableSchema(); + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getUnmodifiedTableSchemaOrBuilder(); + } + /** + * Protobuf type {@code DeleteColumnFamilyStateData} + */ + public static final class DeleteColumnFamilyStateData extends + com.google.protobuf.GeneratedMessage + implements DeleteColumnFamilyStateDataOrBuilder { + // Use DeleteColumnFamilyStateData.newBuilder() to construct. + private DeleteColumnFamilyStateData(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private DeleteColumnFamilyStateData(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final DeleteColumnFamilyStateData defaultInstance; + public static DeleteColumnFamilyStateData getDefaultInstance() { + return defaultInstance; + } + + public DeleteColumnFamilyStateData getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private DeleteColumnFamilyStateData( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = userInfo_.toBuilder(); + } + userInfo_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(userInfo_); + userInfo_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 18: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + subBuilder = tableName_.toBuilder(); + } + tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(tableName_); + tableName_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000002; + break; + } + case 26: { + bitField0_ |= 0x00000004; + columnfamilyName_ = input.readBytes(); + break; + } + case 34: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder subBuilder = null; + if (((bitField0_ & 0x00000008) == 0x00000008)) { + subBuilder = unmodifiedTableSchema_.toBuilder(); + } + unmodifiedTableSchema_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(unmodifiedTableSchema_); + unmodifiedTableSchema_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000008; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_DeleteColumnFamilyStateData_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_DeleteColumnFamilyStateData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public DeleteColumnFamilyStateData parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new DeleteColumnFamilyStateData(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .UserInformation user_info = 1; + public static final int USER_INFO_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation userInfo_; + /** + * required .UserInformation user_info = 1; + */ + public boolean hasUserInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo() { + return userInfo_; + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() { + return userInfo_; + } + + // required .TableName table_name = 2; + public static final int TABLE_NAME_FIELD_NUMBER = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_; + /** + * required .TableName table_name = 2; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + return tableName_; + } + /** + * required .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + return tableName_; + } + + // required bytes columnfamily_name = 3; + public static final int COLUMNFAMILY_NAME_FIELD_NUMBER = 3; + private com.google.protobuf.ByteString columnfamilyName_; + /** + * required bytes columnfamily_name = 3; + */ + public boolean hasColumnfamilyName() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required bytes columnfamily_name = 3; + */ + public com.google.protobuf.ByteString getColumnfamilyName() { + return columnfamilyName_; + } + + // optional .TableSchema unmodified_table_schema = 4; + public static final int UNMODIFIED_TABLE_SCHEMA_FIELD_NUMBER = 4; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema unmodifiedTableSchema_; + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public boolean hasUnmodifiedTableSchema() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getUnmodifiedTableSchema() { + return unmodifiedTableSchema_; + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getUnmodifiedTableSchemaOrBuilder() { + return unmodifiedTableSchema_; + } + + private void initFields() { + userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + columnfamilyName_ = com.google.protobuf.ByteString.EMPTY; + unmodifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasUserInfo()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasTableName()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasColumnfamilyName()) { + memoizedIsInitialized = 0; + return false; + } + if (!getUserInfo().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + if (!getTableName().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + if (hasUnmodifiedTableSchema()) { + if (!getUnmodifiedTableSchema().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, userInfo_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, tableName_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBytes(3, columnfamilyName_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeMessage(4, unmodifiedTableSchema_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, userInfo_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, tableName_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(3, columnfamilyName_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(4, unmodifiedTableSchema_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData other = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData) obj; + + boolean result = true; + result = result && (hasUserInfo() == other.hasUserInfo()); + if (hasUserInfo()) { + result = result && getUserInfo() + .equals(other.getUserInfo()); + } + result = result && (hasTableName() == other.hasTableName()); + if (hasTableName()) { + result = result && getTableName() + .equals(other.getTableName()); + } + result = result && (hasColumnfamilyName() == other.hasColumnfamilyName()); + if (hasColumnfamilyName()) { + result = result && getColumnfamilyName() + .equals(other.getColumnfamilyName()); + } + result = result && (hasUnmodifiedTableSchema() == other.hasUnmodifiedTableSchema()); + if (hasUnmodifiedTableSchema()) { + result = result && getUnmodifiedTableSchema() + .equals(other.getUnmodifiedTableSchema()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasUserInfo()) { + hash = (37 * hash) + USER_INFO_FIELD_NUMBER; + hash = (53 * hash) + getUserInfo().hashCode(); + } + if (hasTableName()) { + hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER; + hash = (53 * hash) + getTableName().hashCode(); + } + if (hasColumnfamilyName()) { + hash = (37 * hash) + COLUMNFAMILY_NAME_FIELD_NUMBER; + hash = (53 * hash) + getColumnfamilyName().hashCode(); + } + if (hasUnmodifiedTableSchema()) { + hash = (37 * hash) + UNMODIFIED_TABLE_SCHEMA_FIELD_NUMBER; + hash = (53 * hash) + getUnmodifiedTableSchema().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code DeleteColumnFamilyStateData} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateDataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_DeleteColumnFamilyStateData_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_DeleteColumnFamilyStateData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getUserInfoFieldBuilder(); + getTableNameFieldBuilder(); + getUnmodifiedTableSchemaFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (userInfoBuilder_ == null) { + userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); + } else { + userInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + } else { + tableNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + columnfamilyName_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000004); + if (unmodifiedTableSchemaBuilder_ == null) { + unmodifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + } else { + unmodifiedTableSchemaBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000008); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_DeleteColumnFamilyStateData_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData result = new org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (userInfoBuilder_ == null) { + result.userInfo_ = userInfo_; + } else { + result.userInfo_ = userInfoBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + if (tableNameBuilder_ == null) { + result.tableName_ = tableName_; + } else { + result.tableName_ = tableNameBuilder_.build(); + } + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.columnfamilyName_ = columnfamilyName_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + if (unmodifiedTableSchemaBuilder_ == null) { + result.unmodifiedTableSchema_ = unmodifiedTableSchema_; + } else { + result.unmodifiedTableSchema_ = unmodifiedTableSchemaBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData.getDefaultInstance()) return this; + if (other.hasUserInfo()) { + mergeUserInfo(other.getUserInfo()); + } + if (other.hasTableName()) { + mergeTableName(other.getTableName()); + } + if (other.hasColumnfamilyName()) { + setColumnfamilyName(other.getColumnfamilyName()); + } + if (other.hasUnmodifiedTableSchema()) { + mergeUnmodifiedTableSchema(other.getUnmodifiedTableSchema()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasUserInfo()) { + + return false; + } + if (!hasTableName()) { + + return false; + } + if (!hasColumnfamilyName()) { + + return false; + } + if (!getUserInfo().isInitialized()) { + + return false; + } + if (!getTableName().isInitialized()) { + + return false; + } + if (hasUnmodifiedTableSchema()) { + if (!getUnmodifiedTableSchema().isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .UserInformation user_info = 1; + private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder> userInfoBuilder_; + /** + * required .UserInformation user_info = 1; + */ + public boolean hasUserInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo() { + if (userInfoBuilder_ == null) { + return userInfo_; + } else { + return userInfoBuilder_.getMessage(); + } + } + /** + * required .UserInformation user_info = 1; + */ + public Builder setUserInfo(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation value) { + if (userInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + userInfo_ = value; + onChanged(); + } else { + userInfoBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .UserInformation user_info = 1; + */ + public Builder setUserInfo( + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder builderForValue) { + if (userInfoBuilder_ == null) { + userInfo_ = builderForValue.build(); + onChanged(); + } else { + userInfoBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .UserInformation user_info = 1; + */ + public Builder mergeUserInfo(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation value) { + if (userInfoBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + userInfo_ != org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance()) { + userInfo_ = + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.newBuilder(userInfo_).mergeFrom(value).buildPartial(); + } else { + userInfo_ = value; + } + onChanged(); + } else { + userInfoBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .UserInformation user_info = 1; + */ + public Builder clearUserInfo() { + if (userInfoBuilder_ == null) { + userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); + onChanged(); + } else { + userInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder getUserInfoBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getUserInfoFieldBuilder().getBuilder(); + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() { + if (userInfoBuilder_ != null) { + return userInfoBuilder_.getMessageOrBuilder(); + } else { + return userInfo_; + } + } + /** + * required .UserInformation user_info = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder> + getUserInfoFieldBuilder() { + if (userInfoBuilder_ == null) { + userInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder>( + userInfo_, + getParentForChildren(), + isClean()); + userInfo_ = null; + } + return userInfoBuilder_; + } + + // required .TableName table_name = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_; + /** + * required .TableName table_name = 2; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + if (tableNameBuilder_ == null) { + return tableName_; + } else { + return tableNameBuilder_.getMessage(); + } + } + /** + * required .TableName table_name = 2; + */ + public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableName_ = value; + onChanged(); + } else { + tableNameBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .TableName table_name = 2; + */ + public Builder setTableName( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + tableName_ = builderForValue.build(); + onChanged(); + } else { + tableNameBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .TableName table_name = 2; + */ + public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) { + tableName_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial(); + } else { + tableName_ = value; + } + onChanged(); + } else { + tableNameBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .TableName table_name = 2; + */ + public Builder clearTableName() { + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + onChanged(); + } else { + tableNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + /** + * required .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getTableNameFieldBuilder().getBuilder(); + } + /** + * required .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + if (tableNameBuilder_ != null) { + return tableNameBuilder_.getMessageOrBuilder(); + } else { + return tableName_; + } + } + /** + * required .TableName table_name = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableNameFieldBuilder() { + if (tableNameBuilder_ == null) { + tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + tableName_, + getParentForChildren(), + isClean()); + tableName_ = null; + } + return tableNameBuilder_; + } + + // required bytes columnfamily_name = 3; + private com.google.protobuf.ByteString columnfamilyName_ = com.google.protobuf.ByteString.EMPTY; + /** + * required bytes columnfamily_name = 3; + */ + public boolean hasColumnfamilyName() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required bytes columnfamily_name = 3; + */ + public com.google.protobuf.ByteString getColumnfamilyName() { + return columnfamilyName_; + } + /** + * required bytes columnfamily_name = 3; + */ + public Builder setColumnfamilyName(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + columnfamilyName_ = value; + onChanged(); + return this; + } + /** + * required bytes columnfamily_name = 3; + */ + public Builder clearColumnfamilyName() { + bitField0_ = (bitField0_ & ~0x00000004); + columnfamilyName_ = getDefaultInstance().getColumnfamilyName(); + onChanged(); + return this; + } + + // optional .TableSchema unmodified_table_schema = 4; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema unmodifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> unmodifiedTableSchemaBuilder_; + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public boolean hasUnmodifiedTableSchema() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getUnmodifiedTableSchema() { + if (unmodifiedTableSchemaBuilder_ == null) { + return unmodifiedTableSchema_; + } else { + return unmodifiedTableSchemaBuilder_.getMessage(); + } + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public Builder setUnmodifiedTableSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) { + if (unmodifiedTableSchemaBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + unmodifiedTableSchema_ = value; + onChanged(); + } else { + unmodifiedTableSchemaBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + return this; + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public Builder setUnmodifiedTableSchema( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder builderForValue) { + if (unmodifiedTableSchemaBuilder_ == null) { + unmodifiedTableSchema_ = builderForValue.build(); + onChanged(); + } else { + unmodifiedTableSchemaBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + return this; + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public Builder mergeUnmodifiedTableSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) { + if (unmodifiedTableSchemaBuilder_ == null) { + if (((bitField0_ & 0x00000008) == 0x00000008) && + unmodifiedTableSchema_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance()) { + unmodifiedTableSchema_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.newBuilder(unmodifiedTableSchema_).mergeFrom(value).buildPartial(); + } else { + unmodifiedTableSchema_ = value; + } + onChanged(); + } else { + unmodifiedTableSchemaBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000008; + return this; + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public Builder clearUnmodifiedTableSchema() { + if (unmodifiedTableSchemaBuilder_ == null) { + unmodifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + onChanged(); + } else { + unmodifiedTableSchemaBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000008); + return this; + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder getUnmodifiedTableSchemaBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return getUnmodifiedTableSchemaFieldBuilder().getBuilder(); + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getUnmodifiedTableSchemaOrBuilder() { + if (unmodifiedTableSchemaBuilder_ != null) { + return unmodifiedTableSchemaBuilder_.getMessageOrBuilder(); + } else { + return unmodifiedTableSchema_; + } + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> + getUnmodifiedTableSchemaFieldBuilder() { + if (unmodifiedTableSchemaBuilder_ == null) { + unmodifiedTableSchemaBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>( + unmodifiedTableSchema_, + getParentForChildren(), + isClean()); + unmodifiedTableSchema_ = null; + } + return unmodifiedTableSchemaBuilder_; + } + + // @@protoc_insertion_point(builder_scope:DeleteColumnFamilyStateData) + } + + static { + defaultInstance = new DeleteColumnFamilyStateData(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:DeleteColumnFamilyStateData) + } + private static com.google.protobuf.Descriptors.Descriptor internal_static_CreateTableStateData_descriptor; private static @@ -3791,6 +7635,21 @@ public final class MasterProcedureProtos { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_DeleteTableStateData_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_AddColumnFamilyStateData_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_AddColumnFamilyStateData_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_ModifyColumnFamilyStateData_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_ModifyColumnFamilyStateData_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_DeleteColumnFamilyStateData_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_DeleteColumnFamilyStateData_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { @@ -3812,27 +7671,59 @@ public final class MasterProcedureProtos { "_modify\030\004 \002(\010\"}\n\024DeleteTableStateData\022#\n", "\tuser_info\030\001 \002(\0132\020.UserInformation\022\036\n\nta" + "ble_name\030\002 \002(\0132\n.TableName\022 \n\013region_inf" + - "o\030\003 \003(\0132\013.RegionInfo*\330\001\n\020CreateTableStat" + - "e\022\036\n\032CREATE_TABLE_PRE_OPERATION\020\001\022 \n\034CRE" + - "ATE_TABLE_WRITE_FS_LAYOUT\020\002\022\034\n\030CREATE_TA" + - "BLE_ADD_TO_META\020\003\022\037\n\033CREATE_TABLE_ASSIGN" + - "_REGIONS\020\004\022\"\n\036CREATE_TABLE_UPDATE_DESC_C" + - "ACHE\020\005\022\037\n\033CREATE_TABLE_POST_OPERATION\020\006*" + - "\207\002\n\020ModifyTableState\022\030\n\024MODIFY_TABLE_PRE" + - "PARE\020\001\022\036\n\032MODIFY_TABLE_PRE_OPERATION\020\002\022(", - "\n$MODIFY_TABLE_UPDATE_TABLE_DESCRIPTOR\020\003" + - "\022&\n\"MODIFY_TABLE_REMOVE_REPLICA_COLUMN\020\004" + - "\022!\n\035MODIFY_TABLE_DELETE_FS_LAYOUT\020\005\022\037\n\033M" + - "ODIFY_TABLE_POST_OPERATION\020\006\022#\n\037MODIFY_T" + - "ABLE_REOPEN_ALL_REGIONS\020\007*\337\001\n\020DeleteTabl" + - "eState\022\036\n\032DELETE_TABLE_PRE_OPERATION\020\001\022!" + - "\n\035DELETE_TABLE_REMOVE_FROM_META\020\002\022 \n\034DEL" + - "ETE_TABLE_CLEAR_FS_LAYOUT\020\003\022\"\n\036DELETE_TA" + - "BLE_UPDATE_DESC_CACHE\020\004\022!\n\035DELETE_TABLE_" + - "UNASSIGN_REGIONS\020\005\022\037\n\033DELETE_TABLE_POST_", - "OPERATION\020\006BK\n*org.apache.hadoop.hbase.p" + - "rotobuf.generatedB\025MasterProcedureProtos" + - "H\001\210\001\001\240\001\001" + "o\030\003 \003(\0132\013.RegionInfo\"\300\001\n\030AddColumnFamily" + + "StateData\022#\n\tuser_info\030\001 \002(\0132\020.UserInfor" + + "mation\022\036\n\ntable_name\030\002 \002(\0132\n.TableName\0220" + + "\n\023columnfamily_schema\030\003 \002(\0132\023.ColumnFami" + + "lySchema\022-\n\027unmodified_table_schema\030\004 \001(" + + "\0132\014.TableSchema\"\303\001\n\033ModifyColumnFamilySt" + + "ateData\022#\n\tuser_info\030\001 \002(\0132\020.UserInforma" + + "tion\022\036\n\ntable_name\030\002 \002(\0132\n.TableName\0220\n\023", + "columnfamily_schema\030\003 \002(\0132\023.ColumnFamily" + + "Schema\022-\n\027unmodified_table_schema\030\004 \001(\0132" + + "\014.TableSchema\"\254\001\n\033DeleteColumnFamilyStat" + + "eData\022#\n\tuser_info\030\001 \002(\0132\020.UserInformati" + + "on\022\036\n\ntable_name\030\002 \002(\0132\n.TableName\022\031\n\021co" + + "lumnfamily_name\030\003 \002(\014\022-\n\027unmodified_tabl" + + "e_schema\030\004 \001(\0132\014.TableSchema*\330\001\n\020CreateT" + + "ableState\022\036\n\032CREATE_TABLE_PRE_OPERATION\020" + + "\001\022 \n\034CREATE_TABLE_WRITE_FS_LAYOUT\020\002\022\034\n\030C" + + "REATE_TABLE_ADD_TO_META\020\003\022\037\n\033CREATE_TABL", + "E_ASSIGN_REGIONS\020\004\022\"\n\036CREATE_TABLE_UPDAT" + + "E_DESC_CACHE\020\005\022\037\n\033CREATE_TABLE_POST_OPER" + + "ATION\020\006*\207\002\n\020ModifyTableState\022\030\n\024MODIFY_T" + + "ABLE_PREPARE\020\001\022\036\n\032MODIFY_TABLE_PRE_OPERA" + + "TION\020\002\022(\n$MODIFY_TABLE_UPDATE_TABLE_DESC" + + "RIPTOR\020\003\022&\n\"MODIFY_TABLE_REMOVE_REPLICA_" + + "COLUMN\020\004\022!\n\035MODIFY_TABLE_DELETE_FS_LAYOU" + + "T\020\005\022\037\n\033MODIFY_TABLE_POST_OPERATION\020\006\022#\n\037" + + "MODIFY_TABLE_REOPEN_ALL_REGIONS\020\007*\337\001\n\020De" + + "leteTableState\022\036\n\032DELETE_TABLE_PRE_OPERA", + "TION\020\001\022!\n\035DELETE_TABLE_REMOVE_FROM_META\020" + + "\002\022 \n\034DELETE_TABLE_CLEAR_FS_LAYOUT\020\003\022\"\n\036D" + + "ELETE_TABLE_UPDATE_DESC_CACHE\020\004\022!\n\035DELET" + + "E_TABLE_UNASSIGN_REGIONS\020\005\022\037\n\033DELETE_TAB" + + "LE_POST_OPERATION\020\006*\331\001\n\024AddColumnFamilyS" + + "tate\022\035\n\031ADD_COLUMN_FAMILY_PREPARE\020\001\022#\n\037A" + + "DD_COLUMN_FAMILY_PRE_OPERATION\020\002\022-\n)ADD_" + + "COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR\020\003\022" + + "$\n ADD_COLUMN_FAMILY_POST_OPERATION\020\004\022(\n" + + "$ADD_COLUMN_FAMILY_REOPEN_ALL_REGIONS\020\005*", + "\353\001\n\027ModifyColumnFamilyState\022 \n\034MODIFY_CO" + + "LUMN_FAMILY_PREPARE\020\001\022&\n\"MODIFY_COLUMN_F" + + "AMILY_PRE_OPERATION\020\002\0220\n,MODIFY_COLUMN_F" + + "AMILY_UPDATE_TABLE_DESCRIPTOR\020\003\022\'\n#MODIF" + + "Y_COLUMN_FAMILY_POST_OPERATION\020\004\022+\n\'MODI" + + "FY_COLUMN_FAMILY_REOPEN_ALL_REGIONS\020\005*\226\002" + + "\n\027DeleteColumnFamilyState\022 \n\034DELETE_COLU" + + "MN_FAMILY_PREPARE\020\001\022&\n\"DELETE_COLUMN_FAM" + + "ILY_PRE_OPERATION\020\002\0220\n,DELETE_COLUMN_FAM" + + "ILY_UPDATE_TABLE_DESCRIPTOR\020\003\022)\n%DELETE_", + "COLUMN_FAMILY_DELETE_FS_LAYOUT\020\004\022\'\n#DELE" + + "TE_COLUMN_FAMILY_POST_OPERATION\020\005\022+\n\'DEL" + + "ETE_COLUMN_FAMILY_REOPEN_ALL_REGIONS\020\006BK" + + "\n*org.apache.hadoop.hbase.protobuf.gener" + + "atedB\025MasterProcedureProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -3857,6 +7748,24 @@ public final class MasterProcedureProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_DeleteTableStateData_descriptor, new java.lang.String[] { "UserInfo", "TableName", "RegionInfo", }); + internal_static_AddColumnFamilyStateData_descriptor = + getDescriptor().getMessageTypes().get(3); + internal_static_AddColumnFamilyStateData_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_AddColumnFamilyStateData_descriptor, + new java.lang.String[] { "UserInfo", "TableName", "ColumnfamilySchema", "UnmodifiedTableSchema", }); + internal_static_ModifyColumnFamilyStateData_descriptor = + getDescriptor().getMessageTypes().get(4); + internal_static_ModifyColumnFamilyStateData_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_ModifyColumnFamilyStateData_descriptor, + new java.lang.String[] { "UserInfo", "TableName", "ColumnfamilySchema", "UnmodifiedTableSchema", }); + internal_static_DeleteColumnFamilyStateData_descriptor = + getDescriptor().getMessageTypes().get(5); + internal_static_DeleteColumnFamilyStateData_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_DeleteColumnFamilyStateData_descriptor, + new java.lang.String[] { "UserInfo", "TableName", "ColumnfamilyName", "UnmodifiedTableSchema", }); return null; } }; diff --git a/hbase-protocol/src/main/protobuf/MasterProcedure.proto b/hbase-protocol/src/main/protobuf/MasterProcedure.proto index 97d1af66637..a07516df450 100644 --- a/hbase-protocol/src/main/protobuf/MasterProcedure.proto +++ b/hbase-protocol/src/main/protobuf/MasterProcedure.proto @@ -89,3 +89,49 @@ message DeleteTableStateData { required TableName table_name = 2; repeated RegionInfo region_info = 3; } + +enum AddColumnFamilyState { + ADD_COLUMN_FAMILY_PREPARE = 1; + ADD_COLUMN_FAMILY_PRE_OPERATION = 2; + ADD_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR = 3; + ADD_COLUMN_FAMILY_POST_OPERATION = 4; + ADD_COLUMN_FAMILY_REOPEN_ALL_REGIONS = 5; +} + +message AddColumnFamilyStateData { + required UserInformation user_info = 1; + required TableName table_name = 2; + required ColumnFamilySchema columnfamily_schema = 3; + optional TableSchema unmodified_table_schema = 4; +} + +enum ModifyColumnFamilyState { + MODIFY_COLUMN_FAMILY_PREPARE = 1; + MODIFY_COLUMN_FAMILY_PRE_OPERATION = 2; + MODIFY_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR = 3; + MODIFY_COLUMN_FAMILY_POST_OPERATION = 4; + MODIFY_COLUMN_FAMILY_REOPEN_ALL_REGIONS = 5; +} + +message ModifyColumnFamilyStateData { + required UserInformation user_info = 1; + required TableName table_name = 2; + required ColumnFamilySchema columnfamily_schema = 3; + optional TableSchema unmodified_table_schema = 4; +} + +enum DeleteColumnFamilyState { + DELETE_COLUMN_FAMILY_PREPARE = 1; + DELETE_COLUMN_FAMILY_PRE_OPERATION = 2; + DELETE_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR = 3; + DELETE_COLUMN_FAMILY_DELETE_FS_LAYOUT = 4; + DELETE_COLUMN_FAMILY_POST_OPERATION = 5; + DELETE_COLUMN_FAMILY_REOPEN_ALL_REGIONS = 6; +} + +message DeleteColumnFamilyStateData { + required UserInformation user_info = 1; + required TableName table_name = 2; + required bytes columnfamily_name = 3; + optional TableSchema unmodified_table_schema = 4; +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index b691a5e0835..4bded76fc85 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -93,14 +93,14 @@ import org.apache.hadoop.hbase.master.cleaner.LogCleaner; import org.apache.hadoop.hbase.master.handler.DisableTableHandler; import org.apache.hadoop.hbase.master.handler.DispatchMergingRegionHandler; import org.apache.hadoop.hbase.master.handler.EnableTableHandler; -import org.apache.hadoop.hbase.master.handler.TableAddFamilyHandler; -import org.apache.hadoop.hbase.master.handler.TableDeleteFamilyHandler; -import org.apache.hadoop.hbase.master.handler.TableModifyFamilyHandler; import org.apache.hadoop.hbase.master.handler.TruncateTableHandler; +import org.apache.hadoop.hbase.master.procedure.AddColumnFamilyProcedure; import org.apache.hadoop.hbase.master.procedure.CreateTableProcedure; +import org.apache.hadoop.hbase.master.procedure.DeleteColumnFamilyProcedure; import org.apache.hadoop.hbase.master.procedure.DeleteTableProcedure; import org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; +import org.apache.hadoop.hbase.master.procedure.ModifyColumnFamilyProcedure; import org.apache.hadoop.hbase.master.procedure.ModifyTableProcedure; import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch; import org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait; @@ -1649,8 +1649,11 @@ public class HMaster extends HRegionServer implements MasterServices, Server { return; } } - //TODO: we should process this (and some others) in an executor - new TableAddFamilyHandler(tableName, columnDescriptor, this, this).prepare().process(); + // Execute the operation synchronously - wait for the operation to complete before continuing. + long procId = + this.procedureExecutor.submitProcedure(new AddColumnFamilyProcedure(procedureExecutor + .getEnvironment(), tableName, columnDescriptor)); + ProcedureSyncWait.waitForProcedureToComplete(procedureExecutor, procId); if (cpHost != null) { cpHost.postAddColumn(tableName, columnDescriptor); } @@ -1668,8 +1671,13 @@ public class HMaster extends HRegionServer implements MasterServices, Server { } } LOG.info(getClientIdAuditPrefix() + " modify " + descriptor); - new TableModifyFamilyHandler(tableName, descriptor, this, this) - .prepare().process(); + + // Execute the operation synchronously - wait for the operation to complete before continuing. + long procId = + this.procedureExecutor.submitProcedure(new ModifyColumnFamilyProcedure(procedureExecutor + .getEnvironment(), tableName, descriptor)); + ProcedureSyncWait.waitForProcedureToComplete(procedureExecutor, procId); + if (cpHost != null) { cpHost.postModifyColumn(tableName, descriptor); } @@ -1685,7 +1693,13 @@ public class HMaster extends HRegionServer implements MasterServices, Server { } } LOG.info(getClientIdAuditPrefix() + " delete " + Bytes.toString(columnName)); - new TableDeleteFamilyHandler(tableName, columnName, this, this).prepare().process(); + + // Execute the operation synchronously - wait for the operation to complete before continuing. + long procId = + this.procedureExecutor.submitProcedure(new DeleteColumnFamilyProcedure(procedureExecutor + .getEnvironment(), tableName, columnName)); + ProcedureSyncWait.waitForProcedureToComplete(procedureExecutor, procId); + if (cpHost != null) { cpHost.postDeleteColumn(tableName, columnName); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableDeleteFamilyHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableDeleteFamilyHandler.java index 285d36d5488..e9a163836f1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableDeleteFamilyHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableDeleteFamilyHandler.java @@ -21,11 +21,11 @@ package org.apache.hadoop.hbase.master.handler; import java.io.IOException; import java.util.List; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.executor.EventType; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; @@ -34,7 +34,7 @@ import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.util.Bytes; /** - * Handles adding a new family to an existing table. + * Handles Deleting a column family from an existing table. */ @InterfaceAudience.Private public class TableDeleteFamilyHandler extends TableEventHandler { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AddColumnFamilyProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AddColumnFamilyProcedure.java new file mode 100644 index 00000000000..6c80dd21212 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AddColumnFamilyProcedure.java @@ -0,0 +1,407 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.procedure; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.security.PrivilegedExceptionAction; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.InvalidFamilyOperationException; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.executor.EventType; +import org.apache.hadoop.hbase.master.MasterCoprocessorHost; +import org.apache.hadoop.hbase.procedure2.StateMachineProcedure; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos; +import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyState; +import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; +import org.apache.hadoop.security.UserGroupInformation; + +/** + * The procedure to add a column family to an existing table. + */ +@InterfaceAudience.Private +public class AddColumnFamilyProcedure + extends StateMachineProcedure + implements TableProcedureInterface { + private static final Log LOG = LogFactory.getLog(AddColumnFamilyProcedure.class); + + private final AtomicBoolean aborted = new AtomicBoolean(false); + + private TableName tableName; + private HTableDescriptor unmodifiedHTableDescriptor; + private HColumnDescriptor cfDescriptor; + private UserGroupInformation user; + + private List regionInfoList; + private Boolean traceEnabled; + + public AddColumnFamilyProcedure() { + this.unmodifiedHTableDescriptor = null; + this.regionInfoList = null; + this.traceEnabled = null; + } + + public AddColumnFamilyProcedure( + final MasterProcedureEnv env, + final TableName tableName, + final HColumnDescriptor cfDescriptor) throws IOException { + this.tableName = tableName; + this.cfDescriptor = cfDescriptor; + this.user = env.getRequestUser().getUGI(); + this.unmodifiedHTableDescriptor = null; + this.regionInfoList = null; + this.traceEnabled = null; + } + + @Override + protected Flow executeFromState(final MasterProcedureEnv env, final AddColumnFamilyState state) { + if (isTraceEnabled()) { + LOG.trace(this + " execute state=" + state); + } + + try { + switch (state) { + case ADD_COLUMN_FAMILY_PREPARE: + prepareAdd(env); + setNextState(AddColumnFamilyState.ADD_COLUMN_FAMILY_PRE_OPERATION); + break; + case ADD_COLUMN_FAMILY_PRE_OPERATION: + preAdd(env, state); + setNextState(AddColumnFamilyState.ADD_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR); + break; + case ADD_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR: + updateTableDescriptor(env); + setNextState(AddColumnFamilyState.ADD_COLUMN_FAMILY_POST_OPERATION); + break; + case ADD_COLUMN_FAMILY_POST_OPERATION: + postAdd(env, state); + setNextState(AddColumnFamilyState.ADD_COLUMN_FAMILY_REOPEN_ALL_REGIONS); + break; + case ADD_COLUMN_FAMILY_REOPEN_ALL_REGIONS: + reOpenAllRegionsIfTableIsOnline(env); + return Flow.NO_MORE_STATE; + default: + throw new UnsupportedOperationException(this + " unhandled state=" + state); + } + } catch (InterruptedException|IOException e) { + LOG.warn("Error trying to add the column family" + getColumnFamilyName() + " to the table " + + tableName + " (in state=" + state + ")", e); + + setFailure("master-add-columnfamily", e); + } + return Flow.HAS_MORE_STATE; + } + + @Override + protected void rollbackState(final MasterProcedureEnv env, final AddColumnFamilyState state) + throws IOException { + if (isTraceEnabled()) { + LOG.trace(this + " rollback state=" + state); + } + try { + switch (state) { + case ADD_COLUMN_FAMILY_REOPEN_ALL_REGIONS: + break; // Nothing to undo. + case ADD_COLUMN_FAMILY_POST_OPERATION: + // TODO-MAYBE: call the coprocessor event to undo? + break; + case ADD_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR: + restoreTableDescriptor(env); + break; + case ADD_COLUMN_FAMILY_PRE_OPERATION: + // TODO-MAYBE: call the coprocessor event to undo? + break; + case ADD_COLUMN_FAMILY_PREPARE: + break; // nothing to do + default: + throw new UnsupportedOperationException(this + " unhandled state=" + state); + } + } catch (IOException e) { + // This will be retried. Unless there is a bug in the code, + // this should be just a "temporary error" (e.g. network down) + LOG.warn("Failed rollback attempt step " + state + " for adding the column family" + + getColumnFamilyName() + " to the table " + tableName, e); + throw e; + } + } + + @Override + protected AddColumnFamilyState getState(final int stateId) { + return AddColumnFamilyState.valueOf(stateId); + } + + @Override + protected int getStateId(final AddColumnFamilyState state) { + return state.getNumber(); + } + + @Override + protected AddColumnFamilyState getInitialState() { + return AddColumnFamilyState.ADD_COLUMN_FAMILY_PREPARE; + } + + @Override + protected void setNextState(AddColumnFamilyState state) { + if (aborted.get()) { + setAbortFailure("add-columnfamily", "abort requested"); + } else { + super.setNextState(state); + } + } + + @Override + public boolean abort(final MasterProcedureEnv env) { + aborted.set(true); + return true; + } + + @Override + protected boolean acquireLock(final MasterProcedureEnv env) { + if (!env.isInitialized()) return false; + return env.getProcedureQueue().tryAcquireTableWrite( + tableName, + EventType.C_M_ADD_FAMILY.toString()); + } + + @Override + protected void releaseLock(final MasterProcedureEnv env) { + env.getProcedureQueue().releaseTableWrite(tableName); + } + + @Override + public void serializeStateData(final OutputStream stream) throws IOException { + super.serializeStateData(stream); + + MasterProcedureProtos.AddColumnFamilyStateData.Builder addCFMsg = + MasterProcedureProtos.AddColumnFamilyStateData.newBuilder() + .setUserInfo(MasterProcedureUtil.toProtoUserInfo(user)) + .setTableName(ProtobufUtil.toProtoTableName(tableName)) + .setColumnfamilySchema(cfDescriptor.convert()); + if (unmodifiedHTableDescriptor != null) { + addCFMsg.setUnmodifiedTableSchema(unmodifiedHTableDescriptor.convert()); + } + + addCFMsg.build().writeDelimitedTo(stream); + } + + @Override + public void deserializeStateData(final InputStream stream) throws IOException { + super.deserializeStateData(stream); + + MasterProcedureProtos.AddColumnFamilyStateData addCFMsg = + MasterProcedureProtos.AddColumnFamilyStateData.parseDelimitedFrom(stream); + user = MasterProcedureUtil.toUserInfo(addCFMsg.getUserInfo()); + tableName = ProtobufUtil.toTableName(addCFMsg.getTableName()); + cfDescriptor = HColumnDescriptor.convert(addCFMsg.getColumnfamilySchema()); + if (addCFMsg.hasUnmodifiedTableSchema()) { + unmodifiedHTableDescriptor = HTableDescriptor.convert(addCFMsg.getUnmodifiedTableSchema()); + } + } + + @Override + public void toStringClassDetails(StringBuilder sb) { + sb.append(getClass().getSimpleName()); + sb.append(" (table="); + sb.append(tableName); + sb.append(", columnfamily="); + if (cfDescriptor != null) { + sb.append(getColumnFamilyName()); + } else { + sb.append("Unknown"); + } + sb.append(") user="); + sb.append(user); + } + + @Override + public TableName getTableName() { + return tableName; + } + + @Override + public TableOperationType getTableOperationType() { + return TableOperationType.EDIT; + } + + /** + * Action before any real action of adding column family. + * @param env MasterProcedureEnv + * @throws IOException + */ + private void prepareAdd(final MasterProcedureEnv env) throws IOException { + // Checks whether the table is allowed to be modified. + MasterDDLOperationHelper.checkTableModifiable(env, tableName); + + // In order to update the descriptor, we need to retrieve the old descriptor for comparison. + unmodifiedHTableDescriptor = env.getMasterServices().getTableDescriptors().get(tableName); + if (unmodifiedHTableDescriptor == null) { + throw new IOException("HTableDescriptor missing for " + tableName); + } + if (unmodifiedHTableDescriptor.hasFamily(cfDescriptor.getName())) { + throw new InvalidFamilyOperationException("Column family '" + getColumnFamilyName() + + "' in table '" + tableName + "' already exists so cannot be added"); + } + } + + /** + * Action before adding column family. + * @param env MasterProcedureEnv + * @param state the procedure state + * @throws IOException + * @throws InterruptedException + */ + private void preAdd(final MasterProcedureEnv env, final AddColumnFamilyState state) + throws IOException, InterruptedException { + runCoprocessorAction(env, state); + } + + /** + * Add the column family to the file system + */ + private void updateTableDescriptor(final MasterProcedureEnv env) throws IOException { + // Update table descriptor + LOG.info("AddColumn. Table = " + tableName + " HCD = " + cfDescriptor.toString()); + + HTableDescriptor htd = env.getMasterServices().getTableDescriptors().get(tableName); + + if (htd.hasFamily(cfDescriptor.getName())) { + // It is possible to reach this situation, as we could already add the column family + // to table descriptor, but the master failover happens before we complete this state. + // We should be able to handle running this function multiple times without causing problem. + return; + } + + htd.addFamily(cfDescriptor); + env.getMasterServices().getTableDescriptors().add(htd); + } + + /** + * Restore the table descriptor back to pre-add + * @param env MasterProcedureEnv + * @throws IOException + **/ + private void restoreTableDescriptor(final MasterProcedureEnv env) throws IOException { + HTableDescriptor htd = env.getMasterServices().getTableDescriptors().get(tableName); + if (htd.hasFamily(cfDescriptor.getName())) { + // Remove the column family from file system and update the table descriptor to + // the before-add-column-family-state + MasterDDLOperationHelper.deleteColumnFamilyFromFileSystem(env, tableName, + getRegionInfoList(env), cfDescriptor.getName()); + + env.getMasterServices().getTableDescriptors().add(unmodifiedHTableDescriptor); + + // Make sure regions are opened after table descriptor is updated. + reOpenAllRegionsIfTableIsOnline(env); + } + } + + /** + * Action after adding column family. + * @param env MasterProcedureEnv + * @param state the procedure state + * @throws IOException + * @throws InterruptedException + */ + private void postAdd(final MasterProcedureEnv env, final AddColumnFamilyState state) + throws IOException, InterruptedException { + runCoprocessorAction(env, state); + } + + /** + * Last action from the procedure - executed when online schema change is supported. + * @param env MasterProcedureEnv + * @throws IOException + */ + private void reOpenAllRegionsIfTableIsOnline(final MasterProcedureEnv env) throws IOException { + // This operation only run when the table is enabled. + if (!env.getMasterServices().getAssignmentManager().getTableStateManager() + .isTableState(getTableName(), ZooKeeperProtos.Table.State.ENABLED)) { + return; + } + + if (MasterDDLOperationHelper.reOpenAllRegions(env, getTableName(), getRegionInfoList(env))) { + LOG.info("Completed add column family operation on table " + getTableName()); + } else { + LOG.warn("Error on reopening the regions on table " + getTableName()); + } + } + + /** + * The procedure could be restarted from a different machine. If the variable is null, we need to + * retrieve it. + * @return traceEnabled + */ + private Boolean isTraceEnabled() { + if (traceEnabled == null) { + traceEnabled = LOG.isTraceEnabled(); + } + return traceEnabled; + } + + private String getColumnFamilyName() { + return cfDescriptor.getNameAsString(); + } + + /** + * Coprocessor Action. + * @param env MasterProcedureEnv + * @param state the procedure state + * @throws IOException + * @throws InterruptedException + */ + private void runCoprocessorAction(final MasterProcedureEnv env, final AddColumnFamilyState state) + throws IOException, InterruptedException { + final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost(); + if (cpHost != null) { + user.doAs(new PrivilegedExceptionAction() { + @Override + public Void run() throws Exception { + switch (state) { + case ADD_COLUMN_FAMILY_PRE_OPERATION: + cpHost.preAddColumnHandler(tableName, cfDescriptor); + break; + case ADD_COLUMN_FAMILY_POST_OPERATION: + cpHost.postAddColumnHandler(tableName, cfDescriptor); + break; + default: + throw new UnsupportedOperationException(this + " unhandled state=" + state); + } + return null; + } + }); + } + } + + private List getRegionInfoList(final MasterProcedureEnv env) throws IOException { + if (regionInfoList == null) { + regionInfoList = ProcedureSyncWait.getRegionsFromMeta(env, getTableName()); + } + return regionInfoList; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteColumnFamilyProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteColumnFamilyProcedure.java new file mode 100644 index 00000000000..316f22500ee --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteColumnFamilyProcedure.java @@ -0,0 +1,439 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.procedure; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.security.PrivilegedExceptionAction; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.InvalidFamilyOperationException; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.executor.EventType; +import org.apache.hadoop.hbase.master.MasterCoprocessorHost; +import org.apache.hadoop.hbase.procedure2.StateMachineProcedure; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos; +import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyState; +import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; +import org.apache.hadoop.hbase.util.ByteStringer; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.security.UserGroupInformation; + +/** + * The procedure to delete a column family from an existing table. + */ +@InterfaceAudience.Private +public class DeleteColumnFamilyProcedure + extends StateMachineProcedure + implements TableProcedureInterface { + private static final Log LOG = LogFactory.getLog(DeleteColumnFamilyProcedure.class); + + private final AtomicBoolean aborted = new AtomicBoolean(false); + + private HTableDescriptor unmodifiedHTableDescriptor; + private TableName tableName; + private byte [] familyName; + private UserGroupInformation user; + + private List regionInfoList; + private Boolean traceEnabled; + + public DeleteColumnFamilyProcedure() { + this.unmodifiedHTableDescriptor = null; + this.regionInfoList = null; + this.traceEnabled = null; + } + + public DeleteColumnFamilyProcedure( + final MasterProcedureEnv env, + final TableName tableName, + final byte[] familyName) throws IOException { + this.tableName = tableName; + this.familyName = familyName; + this.user = env.getRequestUser().getUGI(); + this.unmodifiedHTableDescriptor = null; + this.regionInfoList = null; + this.traceEnabled = null; + } + + @Override + protected Flow executeFromState(final MasterProcedureEnv env, DeleteColumnFamilyState state) { + if (isTraceEnabled()) { + LOG.trace(this + " execute state=" + state); + } + + try { + switch (state) { + case DELETE_COLUMN_FAMILY_PREPARE: + prepareDelete(env); + setNextState(DeleteColumnFamilyState.DELETE_COLUMN_FAMILY_PRE_OPERATION); + break; + case DELETE_COLUMN_FAMILY_PRE_OPERATION: + preDelete(env, state); + setNextState(DeleteColumnFamilyState.DELETE_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR); + break; + case DELETE_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR: + updateTableDescriptor(env); + setNextState(DeleteColumnFamilyState.DELETE_COLUMN_FAMILY_DELETE_FS_LAYOUT); + break; + case DELETE_COLUMN_FAMILY_DELETE_FS_LAYOUT: + deleteFromFs(env); + setNextState(DeleteColumnFamilyState.DELETE_COLUMN_FAMILY_POST_OPERATION); + break; + case DELETE_COLUMN_FAMILY_POST_OPERATION: + postDelete(env, state); + setNextState(DeleteColumnFamilyState.DELETE_COLUMN_FAMILY_REOPEN_ALL_REGIONS); + break; + case DELETE_COLUMN_FAMILY_REOPEN_ALL_REGIONS: + reOpenAllRegionsIfTableIsOnline(env); + return Flow.NO_MORE_STATE; + default: + throw new UnsupportedOperationException(this + " unhandled state=" + state); + } + } catch (InterruptedException|IOException e) { + if (!isRollbackSupported(state)) { + // We reach a state that cannot be rolled back. We just need to keep retry. + LOG.warn("Error trying to delete the column family " + getColumnFamilyName() + + " from table " + tableName + "(in state=" + state + ")", e); + } else { + LOG.error("Error trying to delete the column family " + getColumnFamilyName() + + " from table " + tableName + "(in state=" + state + ")", e); + setFailure("master-delete-column-family", e); + } + } + return Flow.HAS_MORE_STATE; + } + + @Override + protected void rollbackState(final MasterProcedureEnv env, final DeleteColumnFamilyState state) + throws IOException { + if (isTraceEnabled()) { + LOG.trace(this + " rollback state=" + state); + } + try { + switch (state) { + case DELETE_COLUMN_FAMILY_REOPEN_ALL_REGIONS: + break; // Nothing to undo. + case DELETE_COLUMN_FAMILY_POST_OPERATION: + // TODO-MAYBE: call the coprocessor event to undo? + break; + case DELETE_COLUMN_FAMILY_DELETE_FS_LAYOUT: + // Once we reach to this state - we could NOT rollback - as it is tricky to undelete + // the deleted files. We are not suppose to reach here, throw exception so that we know + // there is a code bug to investigate. + throw new UnsupportedOperationException(this + " rollback of state=" + state + + " is unsupported."); + case DELETE_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR: + restoreTableDescriptor(env); + break; + case DELETE_COLUMN_FAMILY_PRE_OPERATION: + // TODO-MAYBE: call the coprocessor event to undo? + break; + case DELETE_COLUMN_FAMILY_PREPARE: + break; // nothing to do + default: + throw new UnsupportedOperationException(this + " unhandled state=" + state); + } + } catch (IOException e) { + // This will be retried. Unless there is a bug in the code, + // this should be just a "temporary error" (e.g. network down) + LOG.warn("Failed rollback attempt step " + state + " for deleting the column family" + + getColumnFamilyName() + " to the table " + tableName, e); + throw e; + } + } + + @Override + protected DeleteColumnFamilyState getState(final int stateId) { + return DeleteColumnFamilyState.valueOf(stateId); + } + + @Override + protected int getStateId(final DeleteColumnFamilyState state) { + return state.getNumber(); + } + + @Override + protected DeleteColumnFamilyState getInitialState() { + return DeleteColumnFamilyState.DELETE_COLUMN_FAMILY_PREPARE; + } + + @Override + protected void setNextState(DeleteColumnFamilyState state) { + if (aborted.get() && isRollbackSupported(state)) { + setAbortFailure("delete-columnfamily", "abort requested"); + } else { + super.setNextState(state); + } + } + + @Override + public boolean abort(final MasterProcedureEnv env) { + aborted.set(true); + return true; + } + + @Override + protected boolean acquireLock(final MasterProcedureEnv env) { + if (!env.isInitialized()) return false; + return env.getProcedureQueue().tryAcquireTableWrite( + tableName, + EventType.C_M_DELETE_FAMILY.toString()); + } + + @Override + protected void releaseLock(final MasterProcedureEnv env) { + env.getProcedureQueue().releaseTableWrite(tableName); + } + + @Override + public void serializeStateData(final OutputStream stream) throws IOException { + super.serializeStateData(stream); + + MasterProcedureProtos.DeleteColumnFamilyStateData.Builder deleteCFMsg = + MasterProcedureProtos.DeleteColumnFamilyStateData.newBuilder() + .setUserInfo(MasterProcedureUtil.toProtoUserInfo(user)) + .setTableName(ProtobufUtil.toProtoTableName(tableName)) + .setColumnfamilyName(ByteStringer.wrap(familyName)); + if (unmodifiedHTableDescriptor != null) { + deleteCFMsg.setUnmodifiedTableSchema(unmodifiedHTableDescriptor.convert()); + } + + deleteCFMsg.build().writeDelimitedTo(stream); + } + + @Override + public void deserializeStateData(final InputStream stream) throws IOException { + super.deserializeStateData(stream); + MasterProcedureProtos.DeleteColumnFamilyStateData deleteCFMsg = + MasterProcedureProtos.DeleteColumnFamilyStateData.parseDelimitedFrom(stream); + user = MasterProcedureUtil.toUserInfo(deleteCFMsg.getUserInfo()); + tableName = ProtobufUtil.toTableName(deleteCFMsg.getTableName()); + familyName = deleteCFMsg.getColumnfamilyName().toByteArray(); + + if (deleteCFMsg.hasUnmodifiedTableSchema()) { + unmodifiedHTableDescriptor = HTableDescriptor.convert(deleteCFMsg.getUnmodifiedTableSchema()); + } + } + + @Override + public void toStringClassDetails(StringBuilder sb) { + sb.append(getClass().getSimpleName()); + sb.append(" (table="); + sb.append(tableName); + sb.append(", columnfamily="); + if (familyName != null) { + sb.append(getColumnFamilyName()); + } else { + sb.append("Unknown"); + } + sb.append(") user="); + sb.append(user); + } + + @Override + public TableName getTableName() { + return tableName; + } + + @Override + public TableOperationType getTableOperationType() { + return TableOperationType.EDIT; + } + + /** + * Action before any real action of deleting column family. + * @param env MasterProcedureEnv + * @throws IOException + */ + private void prepareDelete(final MasterProcedureEnv env) throws IOException { + // Checks whether the table is allowed to be modified. + MasterDDLOperationHelper.checkTableModifiable(env, tableName); + + // In order to update the descriptor, we need to retrieve the old descriptor for comparison. + unmodifiedHTableDescriptor = env.getMasterServices().getTableDescriptors().get(tableName); + if (unmodifiedHTableDescriptor == null) { + throw new IOException("HTableDescriptor missing for " + tableName); + } + if (!unmodifiedHTableDescriptor.hasFamily(familyName)) { + throw new InvalidFamilyOperationException("Family '" + getColumnFamilyName() + + "' does not exist, so it cannot be deleted"); + } + } + + /** + * Action before deleting column family. + * @param env MasterProcedureEnv + * @param state the procedure state + * @throws IOException + * @throws InterruptedException + */ + private void preDelete(final MasterProcedureEnv env, final DeleteColumnFamilyState state) + throws IOException, InterruptedException { + runCoprocessorAction(env, state); + } + + /** + * Remove the column family from the file system and update the table descriptor + */ + private void updateTableDescriptor(final MasterProcedureEnv env) throws IOException { + // Update table descriptor + LOG.info("DeleteColumn. Table = " + tableName + " family = " + getColumnFamilyName()); + + HTableDescriptor htd = env.getMasterServices().getTableDescriptors().get(tableName); + + if (!htd.hasFamily(familyName)) { + // It is possible to reach this situation, as we could already delete the column family + // from table descriptor, but the master failover happens before we complete this state. + // We should be able to handle running this function multiple times without causing problem. + return; + } + + htd.removeFamily(familyName); + env.getMasterServices().getTableDescriptors().add(htd); + } + + /** + * Restore back to the old descriptor + * @param env MasterProcedureEnv + * @throws IOException + **/ + private void restoreTableDescriptor(final MasterProcedureEnv env) throws IOException { + env.getMasterServices().getTableDescriptors().add(unmodifiedHTableDescriptor); + + // Make sure regions are opened after table descriptor is updated. + reOpenAllRegionsIfTableIsOnline(env); + } + + /** + * Remove the column family from the file system + **/ + private void deleteFromFs(final MasterProcedureEnv env) throws IOException { + MasterDDLOperationHelper.deleteColumnFamilyFromFileSystem(env, tableName, + getRegionInfoList(env), familyName); + } + + /** + * Action after deleting column family. + * @param env MasterProcedureEnv + * @param state the procedure state + * @throws IOException + * @throws InterruptedException + */ + private void postDelete(final MasterProcedureEnv env, final DeleteColumnFamilyState state) + throws IOException, InterruptedException { + runCoprocessorAction(env, state); + } + + /** + * Last action from the procedure - executed when online schema change is supported. + * @param env MasterProcedureEnv + * @throws IOException + */ + private void reOpenAllRegionsIfTableIsOnline(final MasterProcedureEnv env) throws IOException { + // This operation only run when the table is enabled. + if (!env.getMasterServices().getAssignmentManager().getTableStateManager() + .isTableState(getTableName(), ZooKeeperProtos.Table.State.ENABLED)) { + return; + } + + if (MasterDDLOperationHelper.reOpenAllRegions(env, getTableName(), getRegionInfoList(env))) { + LOG.info("Completed delete column family operation on table " + getTableName()); + } else { + LOG.warn("Error on reopening the regions on table " + getTableName()); + } + } + + /** + * The procedure could be restarted from a different machine. If the variable is null, we need to + * retrieve it. + * @return traceEnabled + */ + private Boolean isTraceEnabled() { + if (traceEnabled == null) { + traceEnabled = LOG.isTraceEnabled(); + } + return traceEnabled; + } + + private String getColumnFamilyName() { + return Bytes.toString(familyName); + } + + /** + * Coprocessor Action. + * @param env MasterProcedureEnv + * @param state the procedure state + * @throws IOException + * @throws InterruptedException + */ + private void runCoprocessorAction(final MasterProcedureEnv env, + final DeleteColumnFamilyState state) throws IOException, InterruptedException { + final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost(); + if (cpHost != null) { + user.doAs(new PrivilegedExceptionAction() { + @Override + public Void run() throws Exception { + switch (state) { + case DELETE_COLUMN_FAMILY_PRE_OPERATION: + cpHost.preDeleteColumnHandler(tableName, familyName); + break; + case DELETE_COLUMN_FAMILY_POST_OPERATION: + cpHost.postDeleteColumnHandler(tableName, familyName); + break; + default: + throw new UnsupportedOperationException(this + " unhandled state=" + state); + } + return null; + } + }); + } + } + + /* + * Check whether we are in the state that can be rollback + */ + private boolean isRollbackSupported(final DeleteColumnFamilyState state) { + switch (state) { + case DELETE_COLUMN_FAMILY_REOPEN_ALL_REGIONS: + case DELETE_COLUMN_FAMILY_POST_OPERATION: + case DELETE_COLUMN_FAMILY_DELETE_FS_LAYOUT: + // It is not safe to rollback if we reach to these states. + return false; + default: + break; + } + return true; + } + + private List getRegionInfoList(final MasterProcedureEnv env) throws IOException { + if (regionInfoList == null) { + regionInfoList = ProcedureSyncWait.getRegionsFromMeta(env, getTableName()); + } + return regionInfoList; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyColumnFamilyProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyColumnFamilyProcedure.java new file mode 100644 index 00000000000..3de5202e648 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyColumnFamilyProcedure.java @@ -0,0 +1,382 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.procedure; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.security.PrivilegedExceptionAction; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.InvalidFamilyOperationException; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.executor.EventType; +import org.apache.hadoop.hbase.master.MasterCoprocessorHost; +import org.apache.hadoop.hbase.procedure2.StateMachineProcedure; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos; +import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyState; +import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; +import org.apache.hadoop.security.UserGroupInformation; + +/** + * The procedure to modify a column family from an existing table. + */ +@InterfaceAudience.Private +public class ModifyColumnFamilyProcedure + extends StateMachineProcedure + implements TableProcedureInterface { + private static final Log LOG = LogFactory.getLog(ModifyColumnFamilyProcedure.class); + + private final AtomicBoolean aborted = new AtomicBoolean(false); + + private TableName tableName; + private HTableDescriptor unmodifiedHTableDescriptor; + private HColumnDescriptor cfDescriptor; + private UserGroupInformation user; + + private Boolean traceEnabled; + + public ModifyColumnFamilyProcedure() { + this.unmodifiedHTableDescriptor = null; + this.traceEnabled = null; + } + + public ModifyColumnFamilyProcedure( + final MasterProcedureEnv env, + final TableName tableName, + final HColumnDescriptor cfDescriptor) throws IOException { + this.tableName = tableName; + this.cfDescriptor = cfDescriptor; + this.user = env.getRequestUser().getUGI(); + this.unmodifiedHTableDescriptor = null; + this.traceEnabled = null; + } + + @Override + protected Flow executeFromState(final MasterProcedureEnv env, + final ModifyColumnFamilyState state) { + if (isTraceEnabled()) { + LOG.trace(this + " execute state=" + state); + } + + try { + switch (state) { + case MODIFY_COLUMN_FAMILY_PREPARE: + prepareModify(env); + setNextState(ModifyColumnFamilyState.MODIFY_COLUMN_FAMILY_PRE_OPERATION); + break; + case MODIFY_COLUMN_FAMILY_PRE_OPERATION: + preModify(env, state); + setNextState(ModifyColumnFamilyState.MODIFY_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR); + break; + case MODIFY_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR: + updateTableDescriptor(env); + setNextState(ModifyColumnFamilyState.MODIFY_COLUMN_FAMILY_POST_OPERATION); + break; + case MODIFY_COLUMN_FAMILY_POST_OPERATION: + postModify(env, state); + setNextState(ModifyColumnFamilyState.MODIFY_COLUMN_FAMILY_REOPEN_ALL_REGIONS); + break; + case MODIFY_COLUMN_FAMILY_REOPEN_ALL_REGIONS: + reOpenAllRegionsIfTableIsOnline(env); + return Flow.NO_MORE_STATE; + default: + throw new UnsupportedOperationException(this + " unhandled state=" + state); + } + } catch (InterruptedException|IOException e) { + LOG.warn("Error trying to modify the column family " + getColumnFamilyName() + + " of the table " + tableName + "(in state=" + state + ")", e); + + setFailure("master-modify-columnfamily", e); + } + return Flow.HAS_MORE_STATE; + } + + @Override + protected void rollbackState(final MasterProcedureEnv env, final ModifyColumnFamilyState state) + throws IOException { + if (isTraceEnabled()) { + LOG.trace(this + " rollback state=" + state); + } + try { + switch (state) { + case MODIFY_COLUMN_FAMILY_REOPEN_ALL_REGIONS: + break; // Nothing to undo. + case MODIFY_COLUMN_FAMILY_POST_OPERATION: + // TODO-MAYBE: call the coprocessor event to undo? + break; + case MODIFY_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR: + restoreTableDescriptor(env); + break; + case MODIFY_COLUMN_FAMILY_PRE_OPERATION: + // TODO-MAYBE: call the coprocessor event to undo? + break; + case MODIFY_COLUMN_FAMILY_PREPARE: + break; // nothing to do + default: + throw new UnsupportedOperationException(this + " unhandled state=" + state); + } + } catch (IOException e) { + // This will be retried. Unless there is a bug in the code, + // this should be just a "temporary error" (e.g. network down) + LOG.warn("Failed rollback attempt step " + state + " for adding the column family" + + getColumnFamilyName() + " to the table " + tableName, e); + throw e; + } + } + + @Override + protected ModifyColumnFamilyState getState(final int stateId) { + return ModifyColumnFamilyState.valueOf(stateId); + } + + @Override + protected int getStateId(final ModifyColumnFamilyState state) { + return state.getNumber(); + } + + @Override + protected ModifyColumnFamilyState getInitialState() { + return ModifyColumnFamilyState.MODIFY_COLUMN_FAMILY_PREPARE; + } + + @Override + protected void setNextState(ModifyColumnFamilyState state) { + if (aborted.get()) { + setAbortFailure("modify-columnfamily", "abort requested"); + } else { + super.setNextState(state); + } + } + + @Override + public boolean abort(final MasterProcedureEnv env) { + aborted.set(true); + return true; + } + + @Override + protected boolean acquireLock(final MasterProcedureEnv env) { + if (!env.isInitialized()) return false; + return env.getProcedureQueue().tryAcquireTableWrite( + tableName, + EventType.C_M_MODIFY_FAMILY.toString()); + } + + @Override + protected void releaseLock(final MasterProcedureEnv env) { + env.getProcedureQueue().releaseTableWrite(tableName); + } + + @Override + public void serializeStateData(final OutputStream stream) throws IOException { + super.serializeStateData(stream); + + MasterProcedureProtos.ModifyColumnFamilyStateData.Builder modifyCFMsg = + MasterProcedureProtos.ModifyColumnFamilyStateData.newBuilder() + .setUserInfo(MasterProcedureUtil.toProtoUserInfo(user)) + .setTableName(ProtobufUtil.toProtoTableName(tableName)) + .setColumnfamilySchema(cfDescriptor.convert()); + if (unmodifiedHTableDescriptor != null) { + modifyCFMsg.setUnmodifiedTableSchema(unmodifiedHTableDescriptor.convert()); + } + + modifyCFMsg.build().writeDelimitedTo(stream); + } + + @Override + public void deserializeStateData(final InputStream stream) throws IOException { + super.deserializeStateData(stream); + + MasterProcedureProtos.ModifyColumnFamilyStateData modifyCFMsg = + MasterProcedureProtos.ModifyColumnFamilyStateData.parseDelimitedFrom(stream); + user = MasterProcedureUtil.toUserInfo(modifyCFMsg.getUserInfo()); + tableName = ProtobufUtil.toTableName(modifyCFMsg.getTableName()); + cfDescriptor = HColumnDescriptor.convert(modifyCFMsg.getColumnfamilySchema()); + if (modifyCFMsg.hasUnmodifiedTableSchema()) { + unmodifiedHTableDescriptor = HTableDescriptor.convert(modifyCFMsg.getUnmodifiedTableSchema()); + } + } + + @Override + public void toStringClassDetails(StringBuilder sb) { + sb.append(getClass().getSimpleName()); + sb.append(" (table="); + sb.append(tableName); + sb.append(", columnfamily="); + if (cfDescriptor != null) { + sb.append(getColumnFamilyName()); + } else { + sb.append("Unknown"); + } + sb.append(") user="); + sb.append(user); + } + + @Override + public TableName getTableName() { + return tableName; + } + + @Override + public TableOperationType getTableOperationType() { + return TableOperationType.EDIT; + } + + /** + * Action before any real action of modifying column family. + * @param env MasterProcedureEnv + * @throws IOException + */ + private void prepareModify(final MasterProcedureEnv env) throws IOException { + // Checks whether the table is allowed to be modified. + MasterDDLOperationHelper.checkTableModifiable(env, tableName); + + unmodifiedHTableDescriptor = env.getMasterServices().getTableDescriptors().get(tableName); + if (unmodifiedHTableDescriptor == null) { + throw new IOException("HTableDescriptor missing for " + tableName); + } + if (!unmodifiedHTableDescriptor.hasFamily(cfDescriptor.getName())) { + throw new InvalidFamilyOperationException("Family '" + getColumnFamilyName() + + "' does not exist, so it cannot be modified"); + } + } + + /** + * Action before modifying column family. + * @param env MasterProcedureEnv + * @param state the procedure state + * @throws IOException + * @throws InterruptedException + */ + private void preModify(final MasterProcedureEnv env, final ModifyColumnFamilyState state) + throws IOException, InterruptedException { + runCoprocessorAction(env, state); + } + + /** + * Modify the column family from the file system + */ + private void updateTableDescriptor(final MasterProcedureEnv env) throws IOException { + // Update table descriptor + LOG.info("ModifyColumnFamily. Table = " + tableName + " HCD = " + cfDescriptor.toString()); + + HTableDescriptor htd = env.getMasterServices().getTableDescriptors().get(tableName); + htd.modifyFamily(cfDescriptor); + env.getMasterServices().getTableDescriptors().add(htd); + } + + /** + * Restore back to the old descriptor + * @param env MasterProcedureEnv + * @throws IOException + **/ + private void restoreTableDescriptor(final MasterProcedureEnv env) throws IOException { + env.getMasterServices().getTableDescriptors().add(unmodifiedHTableDescriptor); + + // Make sure regions are opened after table descriptor is updated. + reOpenAllRegionsIfTableIsOnline(env); + } + + /** + * Action after modifying column family. + * @param env MasterProcedureEnv + * @param state the procedure state + * @throws IOException + * @throws InterruptedException + */ + private void postModify(final MasterProcedureEnv env, final ModifyColumnFamilyState state) + throws IOException, InterruptedException { + runCoprocessorAction(env, state); + } + + /** + * Last action from the procedure - executed when online schema change is supported. + * @param env MasterProcedureEnv + * @throws IOException + */ + private void reOpenAllRegionsIfTableIsOnline(final MasterProcedureEnv env) throws IOException { + // This operation only run when the table is enabled. + if (!env.getMasterServices().getAssignmentManager().getTableStateManager() + .isTableState(getTableName(), ZooKeeperProtos.Table.State.ENABLED)) { + return; + } + + List regionInfoList = ProcedureSyncWait.getRegionsFromMeta(env, getTableName()); + if (MasterDDLOperationHelper.reOpenAllRegions(env, getTableName(), regionInfoList)) { + LOG.info("Completed add column family operation on table " + getTableName()); + } else { + LOG.warn("Error on reopening the regions on table " + getTableName()); + } + } + + /** + * The procedure could be restarted from a different machine. If the variable is null, we need to + * retrieve it. + * @return traceEnabled + */ + private Boolean isTraceEnabled() { + if (traceEnabled == null) { + traceEnabled = LOG.isTraceEnabled(); + } + return traceEnabled; + } + + private String getColumnFamilyName() { + return cfDescriptor.getNameAsString(); + } + + /** + * Coprocessor Action. + * @param env MasterProcedureEnv + * @param state the procedure state + * @throws IOException + * @throws InterruptedException + */ + private void runCoprocessorAction(final MasterProcedureEnv env, + final ModifyColumnFamilyState state) throws IOException, InterruptedException { + final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost(); + if (cpHost != null) { + user.doAs(new PrivilegedExceptionAction() { + @Override + public Void run() throws Exception { + switch (state) { + case MODIFY_COLUMN_FAMILY_PRE_OPERATION: + cpHost.preModifyColumnHandler(tableName, cfDescriptor); + break; + case MODIFY_COLUMN_FAMILY_POST_OPERATION: + cpHost.postModifyColumnHandler(tableName, cfDescriptor); + break; + default: + throw new UnsupportedOperationException(this + " unhandled state=" + state); + } + return null; + } + }); + } + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java index e13063f6f8c..67806c5944a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java @@ -102,37 +102,6 @@ public class TestTableLockManager { TEST_UTIL.shutdownMiniCluster(); } - @Test(timeout = 600000) - public void testLockTimeoutException() throws Exception { - Configuration conf = TEST_UTIL.getConfiguration(); - conf.setInt(TableLockManager.TABLE_WRITE_LOCK_TIMEOUT_MS, 3000); - prepareMiniCluster(); - HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); - master.getMasterCoprocessorHost().load(TestLockTimeoutExceptionMasterObserver.class, - 0, TEST_UTIL.getConfiguration()); - - ExecutorService executor = Executors.newSingleThreadExecutor(); - Future shouldFinish = executor.submit(new Callable() { - @Override - public Object call() throws Exception { - Admin admin = TEST_UTIL.getHBaseAdmin(); - admin.deleteColumn(TABLE_NAME, FAMILY); - return null; - } - }); - - deleteColumn.await(); - - try { - Admin admin = TEST_UTIL.getHBaseAdmin(); - admin.addColumn(TABLE_NAME, new HColumnDescriptor(NEW_FAMILY)); - fail("Was expecting TableLockTimeoutException"); - } catch (LockTimeoutException ex) { - //expected - } - shouldFinish.get(); - } - public static class TestLockTimeoutExceptionMasterObserver extends BaseMasterObserver { @Override public void preDeleteColumnHandler(ObserverContext ctx, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDeleteFamilyHandler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDeleteFamilyHandler.java index 463fc54575f..3ab91ca08d5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDeleteFamilyHandler.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDeleteFamilyHandler.java @@ -29,18 +29,24 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.InvalidFamilyOperationException; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.wal.WALSplitter; +import org.junit.After; import org.junit.AfterClass; +import org.junit.Assert; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; @@ -57,15 +63,22 @@ public class TestTableDeleteFamilyHandler { /** * Start up a mini cluster and put a small table of empty regions into it. - * + * * @throws Exception */ @BeforeClass public static void beforeAllTests() throws Exception { - TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true); TEST_UTIL.startMiniCluster(2); + } + @AfterClass + public static void afterAllTests() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + @Before + public void setup() throws IOException, InterruptedException { // Create a table of three families. This will assign a region. TEST_UTIL.createTable(TABLENAME, FAMILIES); HTable t = new HTable(TEST_UTIL.getConfiguration(), TABLENAME); @@ -84,22 +97,17 @@ public class TestTableDeleteFamilyHandler { TEST_UTIL.flush(); t.close(); - } - @AfterClass - public static void afterAllTests() throws Exception { - TEST_UTIL.deleteTable(TABLENAME); - TEST_UTIL.shutdownMiniCluster(); - } - - @Before - public void setup() throws IOException, InterruptedException { TEST_UTIL.ensureSomeRegionServersAvailable(2); } + @After + public void cleanup() throws Exception { + TEST_UTIL.deleteTable(TABLENAME); + } + @Test public void deleteColumnFamilyWithMultipleRegions() throws Exception { - Admin admin = TEST_UTIL.getHBaseAdmin(); HTableDescriptor beforehtd = admin.getTableDescriptor(TABLENAME); @@ -112,7 +120,6 @@ public class TestTableDeleteFamilyHandler { assertEquals(3, beforehtd.getColumnFamilies().length); HColumnDescriptor[] families = beforehtd.getColumnFamilies(); for (int i = 0; i < families.length; i++) { - assertTrue(families[i].getNameAsString().equals("cf" + (i + 1))); } @@ -177,4 +184,95 @@ public class TestTableDeleteFamilyHandler { } } + @Test + public void deleteColumnFamilyTwice() throws Exception { + + Admin admin = TEST_UTIL.getHBaseAdmin(); + HTableDescriptor beforehtd = admin.getTableDescriptor(TABLENAME); + String cfToDelete = "cf1"; + + FileSystem fs = TEST_UTIL.getDFSCluster().getFileSystem(); + + // 1 - Check if table exists in descriptor + assertTrue(admin.isTableAvailable(TABLENAME)); + + // 2 - Check if all the target column family exist in descriptor + HColumnDescriptor[] families = beforehtd.getColumnFamilies(); + Boolean foundCF = false; + int i; + for (i = 0; i < families.length; i++) { + if (families[i].getNameAsString().equals(cfToDelete)) { + foundCF = true; + break; + } + } + assertTrue(foundCF); + + // 3 - Check if table exists in FS + Path tableDir = FSUtils.getTableDir(TEST_UTIL.getDefaultRootDirPath(), TABLENAME); + assertTrue(fs.exists(tableDir)); + + // 4 - Check if all the target column family exist in FS + FileStatus[] fileStatus = fs.listStatus(tableDir); + foundCF = false; + for (i = 0; i < fileStatus.length; i++) { + if (fileStatus[i].isDirectory() == true) { + FileStatus[] cf = fs.listStatus(fileStatus[i].getPath(), new PathFilter() { + @Override + public boolean accept(Path p) { + if (p.getName().contains(HConstants.RECOVERED_EDITS_DIR)) { + return false; + } + return true; + } + }); + for (int j = 0; j < cf.length; j++) { + if (cf[j].isDirectory() == true && cf[j].getPath().getName().equals(cfToDelete)) { + foundCF = true; + break; + } + } + } + if (foundCF) { + break; + } + } + assertTrue(foundCF); + + // TEST - Disable and delete the column family + if (admin.isTableEnabled(TABLENAME)) { + admin.disableTable(TABLENAME); + } + admin.deleteColumn(TABLENAME, Bytes.toBytes(cfToDelete)); + + // 5 - Check if the target column family is gone from the FS + fileStatus = fs.listStatus(tableDir); + for (i = 0; i < fileStatus.length; i++) { + if (fileStatus[i].isDirectory() == true) { + FileStatus[] cf = fs.listStatus(fileStatus[i].getPath(), new PathFilter() { + @Override + public boolean accept(Path p) { + if (WALSplitter.isSequenceIdFile(p)) { + return false; + } + return true; + } + }); + for (int j = 0; j < cf.length; j++) { + if (cf[j].isDirectory() == true) { + assertFalse(cf[j].getPath().getName().equals(cfToDelete)); + } + } + } + } + + try { + // Test: delete again + admin.deleteColumn(TABLENAME, Bytes.toBytes(cfToDelete)); + Assert.fail("Delete a non-exist column family should fail"); + } catch (InvalidFamilyOperationException e) { + // Expected. + } + } + } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDescriptorModification.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDescriptorModification.java index d06110195ae..47f2b8ff6f6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDescriptorModification.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDescriptorModification.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.InvalidFamilyOperationException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.master.MasterFileSystem; @@ -37,6 +38,7 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.FSUtils; import org.junit.AfterClass; +import org.junit.Assert; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Rule; @@ -50,7 +52,7 @@ import org.junit.rules.TestName; */ @Category(LargeTests.class) public class TestTableDescriptorModification { - + @Rule public TestName name = new TestName(); private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private static TableName TABLE_NAME = null; @@ -72,7 +74,7 @@ public class TestTableDescriptorModification { TABLE_NAME = TableName.valueOf(name.getMethodName()); } - + @AfterClass public static void afterAllTests() throws Exception { TEST_UTIL.shutdownMiniCluster(); @@ -121,6 +123,95 @@ public class TestTableDescriptorModification { } } + @Test + public void testAddSameColumnFamilyTwice() throws IOException { + Admin admin = TEST_UTIL.getHBaseAdmin(); + // Create a table with one families + HTableDescriptor baseHtd = new HTableDescriptor(TABLE_NAME); + baseHtd.addFamily(new HColumnDescriptor(FAMILY_0)); + admin.createTable(baseHtd); + admin.disableTable(TABLE_NAME); + try { + // Verify the table descriptor + verifyTableDescriptor(TABLE_NAME, FAMILY_0); + + // Modify the table removing one family and verify the descriptor + admin.addColumn(TABLE_NAME, new HColumnDescriptor(FAMILY_1)); + verifyTableDescriptor(TABLE_NAME, FAMILY_0, FAMILY_1); + + try { + // Add same column family again - expect failure + admin.addColumn(TABLE_NAME, new HColumnDescriptor(FAMILY_1)); + Assert.fail("Delete a non-exist column family should fail"); + } catch (InvalidFamilyOperationException e) { + // Expected. + } + + } finally { + admin.deleteTable(TABLE_NAME); + } + } + + @Test + public void testModifyColumnFamily() throws IOException { + Admin admin = TEST_UTIL.getHBaseAdmin(); + + HColumnDescriptor cfDescriptor = new HColumnDescriptor(FAMILY_0); + int blockSize = cfDescriptor.getBlocksize(); + // Create a table with one families + HTableDescriptor baseHtd = new HTableDescriptor(TABLE_NAME); + baseHtd.addFamily(cfDescriptor); + admin.createTable(baseHtd); + admin.disableTable(TABLE_NAME); + try { + // Verify the table descriptor + verifyTableDescriptor(TABLE_NAME, FAMILY_0); + + int newBlockSize = 2 * blockSize; + cfDescriptor.setBlocksize(newBlockSize); + + // Modify colymn family + admin.modifyColumn(TABLE_NAME, cfDescriptor); + + HTableDescriptor htd = admin.getTableDescriptor(TABLE_NAME); + HColumnDescriptor hcfd = htd.getFamily(FAMILY_0); + assertTrue(hcfd.getBlocksize() == newBlockSize); + } finally { + admin.deleteTable(TABLE_NAME); + } + } + + @Test + public void testModifyNonExistingColumnFamily() throws IOException { + Admin admin = TEST_UTIL.getHBaseAdmin(); + + HColumnDescriptor cfDescriptor = new HColumnDescriptor(FAMILY_1); + int blockSize = cfDescriptor.getBlocksize(); + // Create a table with one families + HTableDescriptor baseHtd = new HTableDescriptor(TABLE_NAME); + baseHtd.addFamily(new HColumnDescriptor(FAMILY_0)); + admin.createTable(baseHtd); + admin.disableTable(TABLE_NAME); + try { + // Verify the table descriptor + verifyTableDescriptor(TABLE_NAME, FAMILY_0); + + int newBlockSize = 2 * blockSize; + cfDescriptor.setBlocksize(newBlockSize); + + // Modify a column family that is not in the table. + try { + admin.modifyColumn(TABLE_NAME, cfDescriptor); + Assert.fail("Modify a non-exist column family should fail"); + } catch (InvalidFamilyOperationException e) { + // Expected. + } + + } finally { + admin.deleteTable(TABLE_NAME); + } + } + @Test public void testDeleteColumn() throws IOException { Admin admin = TEST_UTIL.getHBaseAdmin(); @@ -142,6 +233,35 @@ public class TestTableDescriptorModification { } } + @Test + public void testDeleteSameColumnFamilyTwice() throws IOException { + Admin admin = TEST_UTIL.getHBaseAdmin(); + // Create a table with two families + HTableDescriptor baseHtd = new HTableDescriptor(TABLE_NAME); + baseHtd.addFamily(new HColumnDescriptor(FAMILY_0)); + baseHtd.addFamily(new HColumnDescriptor(FAMILY_1)); + admin.createTable(baseHtd); + admin.disableTable(TABLE_NAME); + try { + // Verify the table descriptor + verifyTableDescriptor(TABLE_NAME, FAMILY_0, FAMILY_1); + + // Modify the table removing one family and verify the descriptor + admin.deleteColumn(TABLE_NAME, FAMILY_1); + verifyTableDescriptor(TABLE_NAME, FAMILY_0); + + try { + // Delete again - expect failure + admin.deleteColumn(TABLE_NAME, FAMILY_1); + Assert.fail("Delete a non-exist column family should fail"); + } catch (Exception e) { + // Expected. + } + } finally { + admin.deleteTable(TABLE_NAME); + } + } + private void verifyTableDescriptor(final TableName tableName, final byte[]... families) throws IOException { Admin admin = TEST_UTIL.getHBaseAdmin(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java index a29341edc62..4b1ab9429ff 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java @@ -295,6 +295,39 @@ public class MasterProcedureTestingUtility { ProcedureTestingUtility.assertIsAbortException(procExec.getResult(procId)); } + public static void validateColumnFamilyAddition(final HMaster master, final TableName tableName, + final String family) throws IOException { + HTableDescriptor htd = master.getTableDescriptors().get(tableName); + assertTrue(htd != null); + assertTrue(htd.hasFamily(family.getBytes())); + } + + public static void validateColumnFamilyDeletion(final HMaster master, final TableName tableName, + final String family) throws IOException { + // verify htd + HTableDescriptor htd = master.getTableDescriptors().get(tableName); + assertTrue(htd != null); + assertFalse(htd.hasFamily(family.getBytes())); + + // verify fs + final FileSystem fs = master.getMasterFileSystem().getFileSystem(); + final Path tableDir = FSUtils.getTableDir(master.getMasterFileSystem().getRootDir(), tableName); + for (Path regionDir: FSUtils.getRegionDirs(fs, tableDir)) { + final Path familyDir = new Path(regionDir, family); + assertFalse(family + " family dir should not exist", fs.exists(familyDir)); + } + } + + public static void validateColumnFamilyModification(final HMaster master, + final TableName tableName, final String family, HColumnDescriptor columnDescriptor) + throws IOException { + HTableDescriptor htd = master.getTableDescriptors().get(tableName); + assertTrue(htd != null); + + HColumnDescriptor hcfd = htd.getFamily(family.getBytes()); + assertTrue(hcfd.equals(columnDescriptor)); + } + public static class InjectAbortOnLoadListener implements ProcedureExecutor.ProcedureExecutorListener { private final ProcedureExecutor procExec; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestAddColumnFamilyProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestAddColumnFamilyProcedure.java new file mode 100644 index 00000000000..05eb6027083 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestAddColumnFamilyProcedure.java @@ -0,0 +1,245 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.procedure; + +import static org.junit.Assert.assertTrue; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.InvalidFamilyOperationException; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; +import org.apache.hadoop.hbase.procedure2.ProcedureResult; +import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; +import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyState; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category(MediumTests.class) +public class TestAddColumnFamilyProcedure { + private static final Log LOG = LogFactory.getLog(TestAddColumnFamilyProcedure.class); + + protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); + + private static void setupConf(Configuration conf) { + conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1); + } + + @BeforeClass + public static void setupCluster() throws Exception { + setupConf(UTIL.getConfiguration()); + UTIL.startMiniCluster(1); + } + + @AfterClass + public static void cleanupTest() throws Exception { + try { + UTIL.shutdownMiniCluster(); + } catch (Exception e) { + LOG.warn("failure shutting down cluster", e); + } + } + + @Before + public void setup() throws Exception { + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false); + } + + @After + public void tearDown() throws Exception { + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false); + for (HTableDescriptor htd: UTIL.getHBaseAdmin().listTables()) { + LOG.info("Tear down, remove table=" + htd.getTableName()); + UTIL.deleteTable(htd.getTableName()); + } + } + + @Test(timeout = 60000) + public void testAddColumnFamily() throws Exception { + final TableName tableName = TableName.valueOf("testAddColumnFamily"); + final String cf1 = "cf1"; + final String cf2 = "cf2"; + final HColumnDescriptor columnDescriptor1 = new HColumnDescriptor(cf1); + final HColumnDescriptor columnDescriptor2 = new HColumnDescriptor(cf2); + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f3"); + + // Test 1: Add a column family online + long procId1 = + procExec.submitProcedure(new AddColumnFamilyProcedure(procExec.getEnvironment(), tableName, + columnDescriptor1)); + // Wait the completion + ProcedureTestingUtility.waitProcedure(procExec, procId1); + ProcedureTestingUtility.assertProcNotFailed(procExec, procId1); + + MasterProcedureTestingUtility.validateColumnFamilyAddition(UTIL.getHBaseCluster().getMaster(), + tableName, cf1); + + // Test 2: Add a column family offline + UTIL.getHBaseAdmin().disableTable(tableName); + long procId2 = + procExec.submitProcedure(new AddColumnFamilyProcedure(procExec.getEnvironment(), tableName, + columnDescriptor2)); + // Wait the completion + ProcedureTestingUtility.waitProcedure(procExec, procId2); + ProcedureTestingUtility.assertProcNotFailed(procExec, procId2); + MasterProcedureTestingUtility.validateColumnFamilyAddition(UTIL.getHBaseCluster().getMaster(), + tableName, cf2); + } + + @Test(timeout=60000) + public void testAddSameColumnFamilyTwice() throws Exception { + final TableName tableName = TableName.valueOf("testAddColumnFamilyTwice"); + final String cf2 = "cf2"; + final HColumnDescriptor columnDescriptor = new HColumnDescriptor(cf2); + + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f1"); + + // add the column family + long procId1 = + procExec.submitProcedure(new AddColumnFamilyProcedure(procExec.getEnvironment(), tableName, + columnDescriptor)); + // Wait the completion + ProcedureTestingUtility.waitProcedure(procExec, procId1); + ProcedureTestingUtility.assertProcNotFailed(procExec, procId1); + MasterProcedureTestingUtility.validateColumnFamilyAddition(UTIL.getHBaseCluster().getMaster(), + tableName, cf2); + + // add the column family that exists + long procId2 = + procExec.submitProcedure(new AddColumnFamilyProcedure(procExec.getEnvironment(), tableName, + columnDescriptor)); + // Wait the completion + ProcedureTestingUtility.waitProcedure(procExec, procId2); + + // Second add should fail with InvalidFamilyOperationException + ProcedureResult result = procExec.getResult(procId2); + assertTrue(result.isFailed()); + LOG.debug("Add failed with exception: " + result.getException()); + assertTrue(result.getException().getCause() instanceof InvalidFamilyOperationException); + + // Do the same add the existing column family - this time offline + UTIL.getHBaseAdmin().disableTable(tableName); + long procId3 = + procExec.submitProcedure(new AddColumnFamilyProcedure(procExec.getEnvironment(), tableName, + columnDescriptor)); + // Wait the completion + ProcedureTestingUtility.waitProcedure(procExec, procId3); + + // Second add should fail with InvalidFamilyOperationException + result = procExec.getResult(procId3); + assertTrue(result.isFailed()); + LOG.debug("Add failed with exception: " + result.getException()); + assertTrue(result.getException().getCause() instanceof InvalidFamilyOperationException); + } + + @Test(timeout = 60000) + public void testRecoveryAndDoubleExecutionOffline() throws Exception { + final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecutionOffline"); + final String cf4 = "cf4"; + final HColumnDescriptor columnDescriptor = new HColumnDescriptor(cf4); + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + // create the table + MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f1", "f2", "f3"); + UTIL.getHBaseAdmin().disableTable(tableName); + + ProcedureTestingUtility.waitNoProcedureRunning(procExec); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); + + // Start the AddColumnFamily procedure && kill the executor + long procId = + procExec.submitProcedure(new AddColumnFamilyProcedure(procExec.getEnvironment(), tableName, + columnDescriptor)); + + // Restart the executor and execute the step twice + int numberOfSteps = AddColumnFamilyState.values().length; + MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId, numberOfSteps, + AddColumnFamilyState.values()); + + MasterProcedureTestingUtility.validateColumnFamilyAddition(UTIL.getHBaseCluster().getMaster(), + tableName, cf4); + } + + @Test(timeout = 60000) + public void testRecoveryAndDoubleExecutionOnline() throws Exception { + final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecutionOnline"); + final String cf5 = "cf5"; + final HColumnDescriptor columnDescriptor = new HColumnDescriptor(cf5); + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + // create the table + MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f1", "f2", "f3"); + + ProcedureTestingUtility.waitNoProcedureRunning(procExec); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); + + // Start the AddColumnFamily procedure && kill the executor + long procId = + procExec.submitProcedure(new AddColumnFamilyProcedure(procExec.getEnvironment(), tableName, + columnDescriptor)); + + // Restart the executor and execute the step twice + int numberOfSteps = AddColumnFamilyState.values().length; + MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId, numberOfSteps, + AddColumnFamilyState.values()); + + MasterProcedureTestingUtility.validateColumnFamilyAddition(UTIL.getHBaseCluster().getMaster(), + tableName, cf5); + } + + @Test(timeout = 60000) + public void testRollbackAndDoubleExecution() throws Exception { + final TableName tableName = TableName.valueOf("testRollbackAndDoubleExecution"); + final String cf6 = "cf6"; + final HColumnDescriptor columnDescriptor = new HColumnDescriptor(cf6); + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + // create the table + MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f1", "f2"); + ProcedureTestingUtility.waitNoProcedureRunning(procExec); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); + + // Start the AddColumnFamily procedure && kill the executor + long procId = + procExec.submitProcedure(new AddColumnFamilyProcedure(procExec.getEnvironment(), tableName, + columnDescriptor)); + + int numberOfSteps = AddColumnFamilyState.values().length - 2; // failing in the middle of proc + MasterProcedureTestingUtility.testRollbackAndDoubleExecution(procExec, procId, numberOfSteps, + AddColumnFamilyState.values()); + + MasterProcedureTestingUtility.validateColumnFamilyDeletion(UTIL.getHBaseCluster().getMaster(), + tableName, cf6); + } + + private ProcedureExecutor getMasterProcedureExecutor() { + return UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor(); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteColumnFamilyProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteColumnFamilyProcedure.java new file mode 100644 index 00000000000..5aec002a0f8 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteColumnFamilyProcedure.java @@ -0,0 +1,301 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.procedure; + +import static org.junit.Assert.assertTrue; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.InvalidFamilyOperationException; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; +import org.apache.hadoop.hbase.procedure2.ProcedureResult; +import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; +import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyState; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category(MediumTests.class) +public class TestDeleteColumnFamilyProcedure { + private static final Log LOG = LogFactory.getLog(TestDeleteColumnFamilyProcedure.class); + + protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); + + private static void setupConf(Configuration conf) { + conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1); + } + + @BeforeClass + public static void setupCluster() throws Exception { + setupConf(UTIL.getConfiguration()); + UTIL.startMiniCluster(1); + } + + @AfterClass + public static void cleanupTest() throws Exception { + try { + UTIL.shutdownMiniCluster(); + } catch (Exception e) { + LOG.warn("failure shutting down cluster", e); + } + } + + @Before + public void setup() throws Exception { + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false); + } + + @After + public void tearDown() throws Exception { + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false); + for (HTableDescriptor htd: UTIL.getHBaseAdmin().listTables()) { + LOG.info("Tear down, remove table=" + htd.getTableName()); + UTIL.deleteTable(htd.getTableName()); + } + } + + @Test(timeout = 60000) + public void testDeleteColumnFamily() throws Exception { + final TableName tableName = TableName.valueOf("testDeleteColumnFamily"); + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + final String cf1 = "cf1"; + final String cf2 = "cf2"; + + MasterProcedureTestingUtility.createTable(procExec, tableName, null, cf1, cf2, "f3"); + + // Test 1: delete the column family that exists online + long procId1 = + procExec.submitProcedure(new DeleteColumnFamilyProcedure(procExec.getEnvironment(), + tableName, cf1.getBytes())); + // Wait the completion + ProcedureTestingUtility.waitProcedure(procExec, procId1); + ProcedureTestingUtility.assertProcNotFailed(procExec, procId1); + + MasterProcedureTestingUtility.validateColumnFamilyDeletion(UTIL.getHBaseCluster().getMaster(), + tableName, cf1); + + // Test 2: delete the column family that exists offline + UTIL.getHBaseAdmin().disableTable(tableName); + long procId2 = + procExec.submitProcedure(new DeleteColumnFamilyProcedure(procExec.getEnvironment(), + tableName, cf2.getBytes())); + // Wait the completion + ProcedureTestingUtility.waitProcedure(procExec, procId2); + ProcedureTestingUtility.assertProcNotFailed(procExec, procId2); + } + + @Test(timeout=60000) + public void testDeleteColumnFamilyTwice() throws Exception { + final TableName tableName = TableName.valueOf("testDeleteColumnFamilyTwice"); + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + final String cf2 = "cf2"; + + MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f1", cf2); + + // delete the column family that exists + long procId1 = + procExec.submitProcedure(new DeleteColumnFamilyProcedure(procExec.getEnvironment(), + tableName, cf2.getBytes())); + // Wait the completion + ProcedureTestingUtility.waitProcedure(procExec, procId1); + // First delete should succeed + ProcedureTestingUtility.assertProcNotFailed(procExec, procId1); + + MasterProcedureTestingUtility.validateColumnFamilyDeletion(UTIL.getHBaseCluster().getMaster(), + tableName, cf2); + + // delete the column family that does not exist + long procId2 = + procExec.submitProcedure(new DeleteColumnFamilyProcedure(procExec.getEnvironment(), + tableName, cf2.getBytes())); + + // Wait the completion + ProcedureTestingUtility.waitProcedure(procExec, procId2); + + // Second delete should fail with InvalidFamilyOperationException + ProcedureResult result = procExec.getResult(procId2); + assertTrue(result.isFailed()); + LOG.debug("Delete online failed with exception: " + result.getException()); + assertTrue(result.getException().getCause() instanceof InvalidFamilyOperationException); + + // Try again, this time with table disabled. + UTIL.getHBaseAdmin().disableTable(tableName); + long procId3 = + procExec.submitProcedure(new DeleteColumnFamilyProcedure(procExec.getEnvironment(), + tableName, cf2.getBytes())); + // Wait the completion + ProcedureTestingUtility.waitProcedure(procExec, procId3); + // Expect fail with InvalidFamilyOperationException + result = procExec.getResult(procId2); + assertTrue(result.isFailed()); + LOG.debug("Delete offline failed with exception: " + result.getException()); + assertTrue(result.getException().getCause() instanceof InvalidFamilyOperationException); + } + + @Test(timeout=60000) + public void testDeleteNonExistingColumnFamily() throws Exception { + final TableName tableName = TableName.valueOf("testDeleteNonExistingColumnFamily"); + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + final String cf3 = "cf3"; + + MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f1", "f2"); + + // delete the column family that does not exist + long procId1 = + procExec.submitProcedure(new DeleteColumnFamilyProcedure(procExec.getEnvironment(), + tableName, cf3.getBytes())); + // Wait the completion + ProcedureTestingUtility.waitProcedure(procExec, procId1); + + ProcedureResult result = procExec.getResult(procId1); + assertTrue(result.isFailed()); + LOG.debug("Delete failed with exception: " + result.getException()); + assertTrue(result.getException().getCause() instanceof InvalidFamilyOperationException); + } + + @Test(timeout=60000) + public void testRecoveryAndDoubleExecutionOffline() throws Exception { + final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecutionOffline"); + final String cf4 = "cf4"; + + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + // create the table + MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f1", "f2", "f3", cf4); + UTIL.getHBaseAdmin().disableTable(tableName); + ProcedureTestingUtility.waitNoProcedureRunning(procExec); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); + + // Start the Delete procedure && kill the executor + long procId = + procExec.submitProcedure(new DeleteColumnFamilyProcedure(procExec.getEnvironment(), + tableName, cf4.getBytes())); + + // Restart the executor and execute the step twice + int numberOfSteps = DeleteColumnFamilyState.values().length; + MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId, numberOfSteps, + DeleteColumnFamilyState.values()); + + MasterProcedureTestingUtility.validateColumnFamilyDeletion(UTIL.getHBaseCluster().getMaster(), + tableName, cf4); + } + + @Test(timeout = 60000) + public void testRecoveryAndDoubleExecutionOnline() throws Exception { + final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecutionOnline"); + final String cf5 = "cf5"; + + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + // create the table + MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f1", "f2", "f3", cf5); + ProcedureTestingUtility.waitNoProcedureRunning(procExec); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); + + // Start the Delete procedure && kill the executor + long procId = + procExec.submitProcedure(new DeleteColumnFamilyProcedure(procExec.getEnvironment(), + tableName, cf5.getBytes())); + + // Restart the executor and execute the step twice + int numberOfSteps = DeleteColumnFamilyState.values().length; + MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId, numberOfSteps, + DeleteColumnFamilyState.values()); + + MasterProcedureTestingUtility.validateColumnFamilyDeletion(UTIL.getHBaseCluster().getMaster(), + tableName, cf5); + } + + @Test(timeout = 60000) + public void testRollbackAndDoubleExecution() throws Exception { + final TableName tableName = TableName.valueOf("testRollbackAndDoubleExecution"); + final String cf5 = "cf5"; + + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + // create the table + HRegionInfo[] regions = MasterProcedureTestingUtility.createTable( + procExec, tableName, null, "f1", "f2", "f3", cf5); + ProcedureTestingUtility.waitNoProcedureRunning(procExec); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); + + // Start the Delete procedure && kill the executor + long procId = procExec.submitProcedure( + new DeleteColumnFamilyProcedure(procExec.getEnvironment(), tableName, cf5.getBytes())); + + // Failing before DELETE_COLUMN_FAMILY_DELETE_FS_LAYOUT we should trigger the rollback + // NOTE: the 1 (number before DELETE_COLUMN_FAMILY_DELETE_FS_LAYOUT step) is hardcoded, + // so you have to look at this test at least once when you add a new step. + int numberOfSteps = 1; + MasterProcedureTestingUtility.testRollbackAndDoubleExecution( + procExec, + procId, + numberOfSteps, + DeleteColumnFamilyState.values()); + + MasterProcedureTestingUtility.validateTableCreation( + UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2", "f3", cf5); + } + + @Test(timeout = 60000) + public void testRollbackAndDoubleExecutionAfterPONR() throws Exception { + final TableName tableName = TableName.valueOf("testRollbackAndDoubleExecutionAfterPONR"); + final String cf5 = "cf5"; + + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + // create the table + HRegionInfo[] regions = MasterProcedureTestingUtility.createTable( + procExec, tableName, null, "f1", "f2", "f3", cf5); + ProcedureTestingUtility.waitNoProcedureRunning(procExec); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); + + // Start the Delete procedure && kill the executor + long procId = procExec.submitProcedure( + new DeleteColumnFamilyProcedure(procExec.getEnvironment(), tableName, cf5.getBytes())); + + // Failing after DELETE_COLUMN_FAMILY_DELETE_FS_LAYOUT we should not trigger the rollback. + // NOTE: the 4 (number of DELETE_COLUMN_FAMILY_DELETE_FS_LAYOUT + 1 step) is hardcoded, + // so you have to look at this test at least once when you add a new step. + int numberOfSteps = 4; + MasterProcedureTestingUtility.testRollbackAndDoubleExecutionAfterPONR( + procExec, + procId, + numberOfSteps, + DeleteColumnFamilyState.values()); + + MasterProcedureTestingUtility.validateColumnFamilyDeletion( + UTIL.getHBaseCluster().getMaster(), tableName, cf5); + } + + private ProcedureExecutor getMasterProcedureExecutor() { + return UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor(); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyColumnFamilyProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyColumnFamilyProcedure.java new file mode 100644 index 00000000000..ea8882f1bf4 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyColumnFamilyProcedure.java @@ -0,0 +1,237 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.procedure; + +import static org.junit.Assert.assertTrue; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.InvalidFamilyOperationException; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; +import org.apache.hadoop.hbase.procedure2.ProcedureResult; +import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; +import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyState; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category(MediumTests.class) +public class TestModifyColumnFamilyProcedure { + private static final Log LOG = LogFactory.getLog(TestModifyColumnFamilyProcedure.class); + + protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); + + private static void setupConf(Configuration conf) { + conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1); + } + + @BeforeClass + public static void setupCluster() throws Exception { + setupConf(UTIL.getConfiguration()); + UTIL.startMiniCluster(1); + } + + @AfterClass + public static void cleanupTest() throws Exception { + try { + UTIL.shutdownMiniCluster(); + } catch (Exception e) { + LOG.warn("failure shutting down cluster", e); + } + } + + @Before + public void setup() throws Exception { + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false); + } + + @After + public void tearDown() throws Exception { + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false); + for (HTableDescriptor htd: UTIL.getHBaseAdmin().listTables()) { + LOG.info("Tear down, remove table=" + htd.getTableName()); + UTIL.deleteTable(htd.getTableName()); + } + } + + @Test(timeout = 60000) + public void testModifyColumnFamily() throws Exception { + final TableName tableName = TableName.valueOf("testModifyColumnFamily"); + final String cf1 = "cf1"; + final HColumnDescriptor columnDescriptor = new HColumnDescriptor(cf1); + int oldBlockSize = columnDescriptor.getBlocksize(); + int newBlockSize = 3 * oldBlockSize; + + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + MasterProcedureTestingUtility.createTable(procExec, tableName, null, cf1, "f2"); + + // Test 1: modify the column family online + columnDescriptor.setBlocksize(newBlockSize); + long procId1 = procExec.submitProcedure(new ModifyColumnFamilyProcedure( + procExec.getEnvironment(), tableName, columnDescriptor)); + // Wait the completion + ProcedureTestingUtility.waitProcedure(procExec, procId1); + ProcedureTestingUtility.assertProcNotFailed(procExec, procId1); + MasterProcedureTestingUtility.validateColumnFamilyModification(UTIL.getHBaseCluster() + .getMaster(), tableName, cf1, columnDescriptor); + + // Test 2: modify the column family offline + UTIL.getHBaseAdmin().disableTable(tableName); + columnDescriptor.setBlocksize(newBlockSize * 2); + long procId2 = + procExec.submitProcedure(new ModifyColumnFamilyProcedure(procExec.getEnvironment(), + tableName, columnDescriptor)); + // Wait the completion + ProcedureTestingUtility.waitProcedure(procExec, procId2); + ProcedureTestingUtility.assertProcNotFailed(procExec, procId2); + MasterProcedureTestingUtility.validateColumnFamilyModification(UTIL.getHBaseCluster() + .getMaster(), tableName, cf1, columnDescriptor); + } + + @Test(timeout=60000) + public void testModifyNonExistingColumnFamily() throws Exception { + final TableName tableName = TableName.valueOf("testModifyExistingColumnFamily"); + final String cf2 = "cf2"; + final HColumnDescriptor columnDescriptor = new HColumnDescriptor(cf2); + int oldBlockSize = columnDescriptor.getBlocksize(); + int newBlockSize = 2 * oldBlockSize; + + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f1"); + + // Modify the column family that does not exist + columnDescriptor.setBlocksize(newBlockSize); + long procId1 = procExec.submitProcedure(new ModifyColumnFamilyProcedure( + procExec.getEnvironment(), tableName, columnDescriptor)); + // Wait the completion + ProcedureTestingUtility.waitProcedure(procExec, procId1); + + ProcedureResult result = procExec.getResult(procId1); + assertTrue(result.isFailed()); + LOG.debug("Modify failed with exception: " + result.getException()); + assertTrue(result.getException().getCause() instanceof InvalidFamilyOperationException); + } + + @Test(timeout=60000) + public void testRecoveryAndDoubleExecutionOffline() throws Exception { + final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecutionOffline"); + final String cf3 = "cf3"; + final HColumnDescriptor columnDescriptor = new HColumnDescriptor(cf3); + int oldBlockSize = columnDescriptor.getBlocksize(); + int newBlockSize = 4 * oldBlockSize; + + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + // create the table + MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f1", "f2", cf3); + UTIL.getHBaseAdmin().disableTable(tableName); + ProcedureTestingUtility.waitNoProcedureRunning(procExec); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); + + // Start the Modify procedure && kill the executor + columnDescriptor.setBlocksize(newBlockSize); + long procId = procExec.submitProcedure(new ModifyColumnFamilyProcedure( + procExec.getEnvironment(), tableName, columnDescriptor)); + + // Restart the executor and execute the step twice + int numberOfSteps = ModifyColumnFamilyState.values().length; + MasterProcedureTestingUtility.testRecoveryAndDoubleExecution( + procExec, + procId, + numberOfSteps, + ModifyColumnFamilyState.values()); + + MasterProcedureTestingUtility.validateColumnFamilyModification(UTIL.getHBaseCluster() + .getMaster(), tableName, cf3, columnDescriptor); + } + + @Test(timeout = 60000) + public void testRecoveryAndDoubleExecutionOnline() throws Exception { + final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecutionOnline"); + final String cf4 = "cf4"; + final HColumnDescriptor columnDescriptor = new HColumnDescriptor(cf4); + int oldBlockSize = columnDescriptor.getBlocksize(); + int newBlockSize = 4 * oldBlockSize; + + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + // create the table + MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f1", "f2", cf4); + ProcedureTestingUtility.waitNoProcedureRunning(procExec); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); + + // Start the Modify procedure && kill the executor + columnDescriptor.setBlocksize(newBlockSize); + long procId = + procExec.submitProcedure(new ModifyColumnFamilyProcedure(procExec.getEnvironment(), + tableName, columnDescriptor)); + + // Restart the executor and execute the step twice + int numberOfSteps = ModifyColumnFamilyState.values().length; + MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId, numberOfSteps, + ModifyColumnFamilyState.values()); + + MasterProcedureTestingUtility.validateColumnFamilyModification(UTIL.getHBaseCluster() + .getMaster(), tableName, cf4, columnDescriptor); + } + + @Test(timeout = 60000) + public void testRollbackAndDoubleExecution() throws Exception { + final TableName tableName = TableName.valueOf("testRollbackAndDoubleExecution"); + final String cf3 = "cf3"; + final HColumnDescriptor columnDescriptor = new HColumnDescriptor(cf3); + int oldBlockSize = columnDescriptor.getBlocksize(); + int newBlockSize = 4 * oldBlockSize; + + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + // create the table + MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f1", "f2", cf3); + ProcedureTestingUtility.waitNoProcedureRunning(procExec); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); + + // Start the Modify procedure && kill the executor + columnDescriptor.setBlocksize(newBlockSize); + long procId = procExec.submitProcedure(new ModifyColumnFamilyProcedure( + procExec.getEnvironment(), tableName, columnDescriptor)); + + // Failing in the middle of proc + int numberOfSteps = ModifyColumnFamilyState.values().length - 2; + MasterProcedureTestingUtility.testRollbackAndDoubleExecution( + procExec, + procId, + numberOfSteps, + ModifyColumnFamilyState.values()); + } + + private ProcedureExecutor getMasterProcedureExecutor() { + return UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor(); + } +}