diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java
index d83ee19702f..e0a4775aaee 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java
@@ -253,6 +253,133 @@ public final class MasterProcedureProtos {
// @@protoc_insertion_point(enum_scope:ModifyTableState)
}
+ /**
+ * Protobuf enum {@code TruncateTableState}
+ */
+ public enum TruncateTableState
+ implements com.google.protobuf.ProtocolMessageEnum {
+ /**
+ * TRUNCATE_TABLE_PRE_OPERATION = 1;
+ */
+ TRUNCATE_TABLE_PRE_OPERATION(0, 1),
+ /**
+ * TRUNCATE_TABLE_REMOVE_FROM_META = 2;
+ */
+ TRUNCATE_TABLE_REMOVE_FROM_META(1, 2),
+ /**
+ * TRUNCATE_TABLE_CLEAR_FS_LAYOUT = 3;
+ */
+ TRUNCATE_TABLE_CLEAR_FS_LAYOUT(2, 3),
+ /**
+ * TRUNCATE_TABLE_CREATE_FS_LAYOUT = 4;
+ */
+ TRUNCATE_TABLE_CREATE_FS_LAYOUT(3, 4),
+ /**
+ * TRUNCATE_TABLE_ADD_TO_META = 5;
+ */
+ TRUNCATE_TABLE_ADD_TO_META(4, 5),
+ /**
+ * TRUNCATE_TABLE_ASSIGN_REGIONS = 6;
+ */
+ TRUNCATE_TABLE_ASSIGN_REGIONS(5, 6),
+ /**
+ * TRUNCATE_TABLE_POST_OPERATION = 7;
+ */
+ TRUNCATE_TABLE_POST_OPERATION(6, 7),
+ ;
+
+ /**
+ * TRUNCATE_TABLE_PRE_OPERATION = 1;
+ */
+ public static final int TRUNCATE_TABLE_PRE_OPERATION_VALUE = 1;
+ /**
+ * TRUNCATE_TABLE_REMOVE_FROM_META = 2;
+ */
+ public static final int TRUNCATE_TABLE_REMOVE_FROM_META_VALUE = 2;
+ /**
+ * TRUNCATE_TABLE_CLEAR_FS_LAYOUT = 3;
+ */
+ public static final int TRUNCATE_TABLE_CLEAR_FS_LAYOUT_VALUE = 3;
+ /**
+ * TRUNCATE_TABLE_CREATE_FS_LAYOUT = 4;
+ */
+ public static final int TRUNCATE_TABLE_CREATE_FS_LAYOUT_VALUE = 4;
+ /**
+ * TRUNCATE_TABLE_ADD_TO_META = 5;
+ */
+ public static final int TRUNCATE_TABLE_ADD_TO_META_VALUE = 5;
+ /**
+ * TRUNCATE_TABLE_ASSIGN_REGIONS = 6;
+ */
+ public static final int TRUNCATE_TABLE_ASSIGN_REGIONS_VALUE = 6;
+ /**
+ * TRUNCATE_TABLE_POST_OPERATION = 7;
+ */
+ public static final int TRUNCATE_TABLE_POST_OPERATION_VALUE = 7;
+
+
+ public final int getNumber() { return value; }
+
+ public static TruncateTableState valueOf(int value) {
+ switch (value) {
+ case 1: return TRUNCATE_TABLE_PRE_OPERATION;
+ case 2: return TRUNCATE_TABLE_REMOVE_FROM_META;
+ case 3: return TRUNCATE_TABLE_CLEAR_FS_LAYOUT;
+ case 4: return TRUNCATE_TABLE_CREATE_FS_LAYOUT;
+ case 5: return TRUNCATE_TABLE_ADD_TO_META;
+ case 6: return TRUNCATE_TABLE_ASSIGN_REGIONS;
+ case 7: return TRUNCATE_TABLE_POST_OPERATION;
+ default: return null;
+ }
+ }
+
+ public static com.google.protobuf.Internal.EnumLiteMap
+ internalGetValueMap() {
+ return internalValueMap;
+ }
+ private static com.google.protobuf.Internal.EnumLiteMap
+ internalValueMap =
+ new com.google.protobuf.Internal.EnumLiteMap() {
+ public TruncateTableState findValueByNumber(int number) {
+ return TruncateTableState.valueOf(number);
+ }
+ };
+
+ public final com.google.protobuf.Descriptors.EnumValueDescriptor
+ getValueDescriptor() {
+ return getDescriptor().getValues().get(index);
+ }
+ public final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptorForType() {
+ return getDescriptor();
+ }
+ public static final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(2);
+ }
+
+ private static final TruncateTableState[] VALUES = values();
+
+ public static TruncateTableState valueOf(
+ com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+ if (desc.getType() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "EnumValueDescriptor is not for this type.");
+ }
+ return VALUES[desc.getIndex()];
+ }
+
+ private final int index;
+ private final int value;
+
+ private TruncateTableState(int index, int value) {
+ this.index = index;
+ this.value = value;
+ }
+
+ // @@protoc_insertion_point(enum_scope:TruncateTableState)
+ }
+
/**
* Protobuf enum {@code DeleteTableState}
*/
@@ -346,7 +473,7 @@ public final class MasterProcedureProtos {
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
- return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(2);
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(3);
}
private static final DeleteTableState[] VALUES = values();
@@ -455,7 +582,7 @@ public final class MasterProcedureProtos {
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
- return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(3);
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(4);
}
private static final AddColumnFamilyState[] VALUES = values();
@@ -564,7 +691,7 @@ public final class MasterProcedureProtos {
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
- return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(4);
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(5);
}
private static final ModifyColumnFamilyState[] VALUES = values();
@@ -682,7 +809,7 @@ public final class MasterProcedureProtos {
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
- return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(5);
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(6);
}
private static final DeleteColumnFamilyState[] VALUES = values();
@@ -800,7 +927,7 @@ public final class MasterProcedureProtos {
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
- return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(6);
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(7);
}
private static final EnableTableState[] VALUES = values();
@@ -918,7 +1045,7 @@ public final class MasterProcedureProtos {
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
- return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(7);
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(8);
}
private static final DisableTableState[] VALUES = values();
@@ -3190,6 +3317,1471 @@ public final class MasterProcedureProtos {
// @@protoc_insertion_point(class_scope:ModifyTableStateData)
}
+ public interface TruncateTableStateDataOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required .UserInformation user_info = 1;
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ boolean hasUserInfo();
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo();
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder();
+
+ // required bool preserve_splits = 2;
+ /**
+ * required bool preserve_splits = 2;
+ */
+ boolean hasPreserveSplits();
+ /**
+ * required bool preserve_splits = 2;
+ */
+ boolean getPreserveSplits();
+
+ // optional .TableName table_name = 3;
+ /**
+ * optional .TableName table_name = 3;
+ */
+ boolean hasTableName();
+ /**
+ * optional .TableName table_name = 3;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName();
+ /**
+ * optional .TableName table_name = 3;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder();
+
+ // optional .TableSchema table_schema = 4;
+ /**
+ * optional .TableSchema table_schema = 4;
+ */
+ boolean hasTableSchema();
+ /**
+ * optional .TableSchema table_schema = 4;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getTableSchema();
+ /**
+ * optional .TableSchema table_schema = 4;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getTableSchemaOrBuilder();
+
+ // repeated .RegionInfo region_info = 5;
+ /**
+ * repeated .RegionInfo region_info = 5;
+ */
+ java.util.List
+ getRegionInfoList();
+ /**
+ * repeated .RegionInfo region_info = 5;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(int index);
+ /**
+ * repeated .RegionInfo region_info = 5;
+ */
+ int getRegionInfoCount();
+ /**
+ * repeated .RegionInfo region_info = 5;
+ */
+ java.util.List extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>
+ getRegionInfoOrBuilderList();
+ /**
+ * repeated .RegionInfo region_info = 5;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder(
+ int index);
+ }
+ /**
+ * Protobuf type {@code TruncateTableStateData}
+ */
+ public static final class TruncateTableStateData extends
+ com.google.protobuf.GeneratedMessage
+ implements TruncateTableStateDataOrBuilder {
+ // Use TruncateTableStateData.newBuilder() to construct.
+ private TruncateTableStateData(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private TruncateTableStateData(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final TruncateTableStateData defaultInstance;
+ public static TruncateTableStateData getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public TruncateTableStateData getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private TruncateTableStateData(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ subBuilder = userInfo_.toBuilder();
+ }
+ userInfo_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(userInfo_);
+ userInfo_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000001;
+ break;
+ }
+ case 16: {
+ bitField0_ |= 0x00000002;
+ preserveSplits_ = input.readBool();
+ break;
+ }
+ case 26: {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ subBuilder = tableName_.toBuilder();
+ }
+ tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(tableName_);
+ tableName_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000004;
+ break;
+ }
+ case 34: {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ subBuilder = tableSchema_.toBuilder();
+ }
+ tableSchema_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(tableSchema_);
+ tableSchema_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000008;
+ break;
+ }
+ case 42: {
+ if (!((mutable_bitField0_ & 0x00000010) == 0x00000010)) {
+ regionInfo_ = new java.util.ArrayList();
+ mutable_bitField0_ |= 0x00000010;
+ }
+ regionInfo_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry));
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ if (((mutable_bitField0_ & 0x00000010) == 0x00000010)) {
+ regionInfo_ = java.util.Collections.unmodifiableList(regionInfo_);
+ }
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_TruncateTableStateData_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_TruncateTableStateData_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public TruncateTableStateData parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new TruncateTableStateData(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required .UserInformation user_info = 1;
+ public static final int USER_INFO_FIELD_NUMBER = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation userInfo_;
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public boolean hasUserInfo() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo() {
+ return userInfo_;
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() {
+ return userInfo_;
+ }
+
+ // required bool preserve_splits = 2;
+ public static final int PRESERVE_SPLITS_FIELD_NUMBER = 2;
+ private boolean preserveSplits_;
+ /**
+ * required bool preserve_splits = 2;
+ */
+ public boolean hasPreserveSplits() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required bool preserve_splits = 2;
+ */
+ public boolean getPreserveSplits() {
+ return preserveSplits_;
+ }
+
+ // optional .TableName table_name = 3;
+ public static final int TABLE_NAME_FIELD_NUMBER = 3;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_;
+ /**
+ * optional .TableName table_name = 3;
+ */
+ public boolean hasTableName() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * optional .TableName table_name = 3;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() {
+ return tableName_;
+ }
+ /**
+ * optional .TableName table_name = 3;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
+ return tableName_;
+ }
+
+ // optional .TableSchema table_schema = 4;
+ public static final int TABLE_SCHEMA_FIELD_NUMBER = 4;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema tableSchema_;
+ /**
+ * optional .TableSchema table_schema = 4;
+ */
+ public boolean hasTableSchema() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * optional .TableSchema table_schema = 4;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getTableSchema() {
+ return tableSchema_;
+ }
+ /**
+ * optional .TableSchema table_schema = 4;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getTableSchemaOrBuilder() {
+ return tableSchema_;
+ }
+
+ // repeated .RegionInfo region_info = 5;
+ public static final int REGION_INFO_FIELD_NUMBER = 5;
+ private java.util.List regionInfo_;
+ /**
+ * repeated .RegionInfo region_info = 5;
+ */
+ public java.util.List getRegionInfoList() {
+ return regionInfo_;
+ }
+ /**
+ * repeated .RegionInfo region_info = 5;
+ */
+ public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>
+ getRegionInfoOrBuilderList() {
+ return regionInfo_;
+ }
+ /**
+ * repeated .RegionInfo region_info = 5;
+ */
+ public int getRegionInfoCount() {
+ return regionInfo_.size();
+ }
+ /**
+ * repeated .RegionInfo region_info = 5;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(int index) {
+ return regionInfo_.get(index);
+ }
+ /**
+ * repeated .RegionInfo region_info = 5;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder(
+ int index) {
+ return regionInfo_.get(index);
+ }
+
+ private void initFields() {
+ userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
+ preserveSplits_ = false;
+ tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ tableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+ regionInfo_ = java.util.Collections.emptyList();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasUserInfo()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasPreserveSplits()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getUserInfo().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (hasTableName()) {
+ if (!getTableName().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ if (hasTableSchema()) {
+ if (!getTableSchema().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ for (int i = 0; i < getRegionInfoCount(); i++) {
+ if (!getRegionInfo(i).isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeMessage(1, userInfo_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeBool(2, preserveSplits_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeMessage(3, tableName_);
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ output.writeMessage(4, tableSchema_);
+ }
+ for (int i = 0; i < regionInfo_.size(); i++) {
+ output.writeMessage(5, regionInfo_.get(i));
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(1, userInfo_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBoolSize(2, preserveSplits_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(3, tableName_);
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(4, tableSchema_);
+ }
+ for (int i = 0; i < regionInfo_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(5, regionInfo_.get(i));
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData other = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData) obj;
+
+ boolean result = true;
+ result = result && (hasUserInfo() == other.hasUserInfo());
+ if (hasUserInfo()) {
+ result = result && getUserInfo()
+ .equals(other.getUserInfo());
+ }
+ result = result && (hasPreserveSplits() == other.hasPreserveSplits());
+ if (hasPreserveSplits()) {
+ result = result && (getPreserveSplits()
+ == other.getPreserveSplits());
+ }
+ result = result && (hasTableName() == other.hasTableName());
+ if (hasTableName()) {
+ result = result && getTableName()
+ .equals(other.getTableName());
+ }
+ result = result && (hasTableSchema() == other.hasTableSchema());
+ if (hasTableSchema()) {
+ result = result && getTableSchema()
+ .equals(other.getTableSchema());
+ }
+ result = result && getRegionInfoList()
+ .equals(other.getRegionInfoList());
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasUserInfo()) {
+ hash = (37 * hash) + USER_INFO_FIELD_NUMBER;
+ hash = (53 * hash) + getUserInfo().hashCode();
+ }
+ if (hasPreserveSplits()) {
+ hash = (37 * hash) + PRESERVE_SPLITS_FIELD_NUMBER;
+ hash = (53 * hash) + hashBoolean(getPreserveSplits());
+ }
+ if (hasTableName()) {
+ hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER;
+ hash = (53 * hash) + getTableName().hashCode();
+ }
+ if (hasTableSchema()) {
+ hash = (37 * hash) + TABLE_SCHEMA_FIELD_NUMBER;
+ hash = (53 * hash) + getTableSchema().hashCode();
+ }
+ if (getRegionInfoCount() > 0) {
+ hash = (37 * hash) + REGION_INFO_FIELD_NUMBER;
+ hash = (53 * hash) + getRegionInfoList().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code TruncateTableStateData}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateDataOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_TruncateTableStateData_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_TruncateTableStateData_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getUserInfoFieldBuilder();
+ getTableNameFieldBuilder();
+ getTableSchemaFieldBuilder();
+ getRegionInfoFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ if (userInfoBuilder_ == null) {
+ userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
+ } else {
+ userInfoBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ preserveSplits_ = false;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ if (tableNameBuilder_ == null) {
+ tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ } else {
+ tableNameBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000004);
+ if (tableSchemaBuilder_ == null) {
+ tableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+ } else {
+ tableSchemaBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000008);
+ if (regionInfoBuilder_ == null) {
+ regionInfo_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000010);
+ } else {
+ regionInfoBuilder_.clear();
+ }
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_TruncateTableStateData_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData build() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData result = new org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ if (userInfoBuilder_ == null) {
+ result.userInfo_ = userInfo_;
+ } else {
+ result.userInfo_ = userInfoBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.preserveSplits_ = preserveSplits_;
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ if (tableNameBuilder_ == null) {
+ result.tableName_ = tableName_;
+ } else {
+ result.tableName_ = tableNameBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
+ to_bitField0_ |= 0x00000008;
+ }
+ if (tableSchemaBuilder_ == null) {
+ result.tableSchema_ = tableSchema_;
+ } else {
+ result.tableSchema_ = tableSchemaBuilder_.build();
+ }
+ if (regionInfoBuilder_ == null) {
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
+ regionInfo_ = java.util.Collections.unmodifiableList(regionInfo_);
+ bitField0_ = (bitField0_ & ~0x00000010);
+ }
+ result.regionInfo_ = regionInfo_;
+ } else {
+ result.regionInfo_ = regionInfoBuilder_.build();
+ }
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData.getDefaultInstance()) return this;
+ if (other.hasUserInfo()) {
+ mergeUserInfo(other.getUserInfo());
+ }
+ if (other.hasPreserveSplits()) {
+ setPreserveSplits(other.getPreserveSplits());
+ }
+ if (other.hasTableName()) {
+ mergeTableName(other.getTableName());
+ }
+ if (other.hasTableSchema()) {
+ mergeTableSchema(other.getTableSchema());
+ }
+ if (regionInfoBuilder_ == null) {
+ if (!other.regionInfo_.isEmpty()) {
+ if (regionInfo_.isEmpty()) {
+ regionInfo_ = other.regionInfo_;
+ bitField0_ = (bitField0_ & ~0x00000010);
+ } else {
+ ensureRegionInfoIsMutable();
+ regionInfo_.addAll(other.regionInfo_);
+ }
+ onChanged();
+ }
+ } else {
+ if (!other.regionInfo_.isEmpty()) {
+ if (regionInfoBuilder_.isEmpty()) {
+ regionInfoBuilder_.dispose();
+ regionInfoBuilder_ = null;
+ regionInfo_ = other.regionInfo_;
+ bitField0_ = (bitField0_ & ~0x00000010);
+ regionInfoBuilder_ =
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+ getRegionInfoFieldBuilder() : null;
+ } else {
+ regionInfoBuilder_.addAllMessages(other.regionInfo_);
+ }
+ }
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasUserInfo()) {
+
+ return false;
+ }
+ if (!hasPreserveSplits()) {
+
+ return false;
+ }
+ if (!getUserInfo().isInitialized()) {
+
+ return false;
+ }
+ if (hasTableName()) {
+ if (!getTableName().isInitialized()) {
+
+ return false;
+ }
+ }
+ if (hasTableSchema()) {
+ if (!getTableSchema().isInitialized()) {
+
+ return false;
+ }
+ }
+ for (int i = 0; i < getRegionInfoCount(); i++) {
+ if (!getRegionInfo(i).isInitialized()) {
+
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required .UserInformation user_info = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder> userInfoBuilder_;
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public boolean hasUserInfo() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo() {
+ if (userInfoBuilder_ == null) {
+ return userInfo_;
+ } else {
+ return userInfoBuilder_.getMessage();
+ }
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public Builder setUserInfo(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation value) {
+ if (userInfoBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ userInfo_ = value;
+ onChanged();
+ } else {
+ userInfoBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public Builder setUserInfo(
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder builderForValue) {
+ if (userInfoBuilder_ == null) {
+ userInfo_ = builderForValue.build();
+ onChanged();
+ } else {
+ userInfoBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public Builder mergeUserInfo(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation value) {
+ if (userInfoBuilder_ == null) {
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
+ userInfo_ != org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance()) {
+ userInfo_ =
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.newBuilder(userInfo_).mergeFrom(value).buildPartial();
+ } else {
+ userInfo_ = value;
+ }
+ onChanged();
+ } else {
+ userInfoBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public Builder clearUserInfo() {
+ if (userInfoBuilder_ == null) {
+ userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
+ onChanged();
+ } else {
+ userInfoBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder getUserInfoBuilder() {
+ bitField0_ |= 0x00000001;
+ onChanged();
+ return getUserInfoFieldBuilder().getBuilder();
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() {
+ if (userInfoBuilder_ != null) {
+ return userInfoBuilder_.getMessageOrBuilder();
+ } else {
+ return userInfo_;
+ }
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder>
+ getUserInfoFieldBuilder() {
+ if (userInfoBuilder_ == null) {
+ userInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder>(
+ userInfo_,
+ getParentForChildren(),
+ isClean());
+ userInfo_ = null;
+ }
+ return userInfoBuilder_;
+ }
+
+ // required bool preserve_splits = 2;
+ private boolean preserveSplits_ ;
+ /**
+ * required bool preserve_splits = 2;
+ */
+ public boolean hasPreserveSplits() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required bool preserve_splits = 2;
+ */
+ public boolean getPreserveSplits() {
+ return preserveSplits_;
+ }
+ /**
+ * required bool preserve_splits = 2;
+ */
+ public Builder setPreserveSplits(boolean value) {
+ bitField0_ |= 0x00000002;
+ preserveSplits_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required bool preserve_splits = 2;
+ */
+ public Builder clearPreserveSplits() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ preserveSplits_ = false;
+ onChanged();
+ return this;
+ }
+
+ // optional .TableName table_name = 3;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_;
+ /**
+ * optional .TableName table_name = 3;
+ */
+ public boolean hasTableName() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * optional .TableName table_name = 3;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() {
+ if (tableNameBuilder_ == null) {
+ return tableName_;
+ } else {
+ return tableNameBuilder_.getMessage();
+ }
+ }
+ /**
+ * optional .TableName table_name = 3;
+ */
+ public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
+ if (tableNameBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ tableName_ = value;
+ onChanged();
+ } else {
+ tableNameBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000004;
+ return this;
+ }
+ /**
+ * optional .TableName table_name = 3;
+ */
+ public Builder setTableName(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) {
+ if (tableNameBuilder_ == null) {
+ tableName_ = builderForValue.build();
+ onChanged();
+ } else {
+ tableNameBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000004;
+ return this;
+ }
+ /**
+ * optional .TableName table_name = 3;
+ */
+ public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
+ if (tableNameBuilder_ == null) {
+ if (((bitField0_ & 0x00000004) == 0x00000004) &&
+ tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) {
+ tableName_ =
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial();
+ } else {
+ tableName_ = value;
+ }
+ onChanged();
+ } else {
+ tableNameBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000004;
+ return this;
+ }
+ /**
+ * optional .TableName table_name = 3;
+ */
+ public Builder clearTableName() {
+ if (tableNameBuilder_ == null) {
+ tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ onChanged();
+ } else {
+ tableNameBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000004);
+ return this;
+ }
+ /**
+ * optional .TableName table_name = 3;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() {
+ bitField0_ |= 0x00000004;
+ onChanged();
+ return getTableNameFieldBuilder().getBuilder();
+ }
+ /**
+ * optional .TableName table_name = 3;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
+ if (tableNameBuilder_ != null) {
+ return tableNameBuilder_.getMessageOrBuilder();
+ } else {
+ return tableName_;
+ }
+ }
+ /**
+ * optional .TableName table_name = 3;
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>
+ getTableNameFieldBuilder() {
+ if (tableNameBuilder_ == null) {
+ tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>(
+ tableName_,
+ getParentForChildren(),
+ isClean());
+ tableName_ = null;
+ }
+ return tableNameBuilder_;
+ }
+
+ // optional .TableSchema table_schema = 4;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema tableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> tableSchemaBuilder_;
+ /**
+ * optional .TableSchema table_schema = 4;
+ */
+ public boolean hasTableSchema() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * optional .TableSchema table_schema = 4;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getTableSchema() {
+ if (tableSchemaBuilder_ == null) {
+ return tableSchema_;
+ } else {
+ return tableSchemaBuilder_.getMessage();
+ }
+ }
+ /**
+ * optional .TableSchema table_schema = 4;
+ */
+ public Builder setTableSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) {
+ if (tableSchemaBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ tableSchema_ = value;
+ onChanged();
+ } else {
+ tableSchemaBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000008;
+ return this;
+ }
+ /**
+ * optional .TableSchema table_schema = 4;
+ */
+ public Builder setTableSchema(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder builderForValue) {
+ if (tableSchemaBuilder_ == null) {
+ tableSchema_ = builderForValue.build();
+ onChanged();
+ } else {
+ tableSchemaBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000008;
+ return this;
+ }
+ /**
+ * optional .TableSchema table_schema = 4;
+ */
+ public Builder mergeTableSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) {
+ if (tableSchemaBuilder_ == null) {
+ if (((bitField0_ & 0x00000008) == 0x00000008) &&
+ tableSchema_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance()) {
+ tableSchema_ =
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.newBuilder(tableSchema_).mergeFrom(value).buildPartial();
+ } else {
+ tableSchema_ = value;
+ }
+ onChanged();
+ } else {
+ tableSchemaBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000008;
+ return this;
+ }
+ /**
+ * optional .TableSchema table_schema = 4;
+ */
+ public Builder clearTableSchema() {
+ if (tableSchemaBuilder_ == null) {
+ tableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+ onChanged();
+ } else {
+ tableSchemaBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000008);
+ return this;
+ }
+ /**
+ * optional .TableSchema table_schema = 4;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder getTableSchemaBuilder() {
+ bitField0_ |= 0x00000008;
+ onChanged();
+ return getTableSchemaFieldBuilder().getBuilder();
+ }
+ /**
+ * optional .TableSchema table_schema = 4;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getTableSchemaOrBuilder() {
+ if (tableSchemaBuilder_ != null) {
+ return tableSchemaBuilder_.getMessageOrBuilder();
+ } else {
+ return tableSchema_;
+ }
+ }
+ /**
+ * optional .TableSchema table_schema = 4;
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>
+ getTableSchemaFieldBuilder() {
+ if (tableSchemaBuilder_ == null) {
+ tableSchemaBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>(
+ tableSchema_,
+ getParentForChildren(),
+ isClean());
+ tableSchema_ = null;
+ }
+ return tableSchemaBuilder_;
+ }
+
+ // repeated .RegionInfo region_info = 5;
+ private java.util.List regionInfo_ =
+ java.util.Collections.emptyList();
+ private void ensureRegionInfoIsMutable() {
+ if (!((bitField0_ & 0x00000010) == 0x00000010)) {
+ regionInfo_ = new java.util.ArrayList(regionInfo_);
+ bitField0_ |= 0x00000010;
+ }
+ }
+
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> regionInfoBuilder_;
+
+ /**
+ * repeated .RegionInfo region_info = 5;
+ */
+ public java.util.List getRegionInfoList() {
+ if (regionInfoBuilder_ == null) {
+ return java.util.Collections.unmodifiableList(regionInfo_);
+ } else {
+ return regionInfoBuilder_.getMessageList();
+ }
+ }
+ /**
+ * repeated .RegionInfo region_info = 5;
+ */
+ public int getRegionInfoCount() {
+ if (regionInfoBuilder_ == null) {
+ return regionInfo_.size();
+ } else {
+ return regionInfoBuilder_.getCount();
+ }
+ }
+ /**
+ * repeated .RegionInfo region_info = 5;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(int index) {
+ if (regionInfoBuilder_ == null) {
+ return regionInfo_.get(index);
+ } else {
+ return regionInfoBuilder_.getMessage(index);
+ }
+ }
+ /**
+ * repeated .RegionInfo region_info = 5;
+ */
+ public Builder setRegionInfo(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) {
+ if (regionInfoBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureRegionInfoIsMutable();
+ regionInfo_.set(index, value);
+ onChanged();
+ } else {
+ regionInfoBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .RegionInfo region_info = 5;
+ */
+ public Builder setRegionInfo(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) {
+ if (regionInfoBuilder_ == null) {
+ ensureRegionInfoIsMutable();
+ regionInfo_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ regionInfoBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .RegionInfo region_info = 5;
+ */
+ public Builder addRegionInfo(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) {
+ if (regionInfoBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureRegionInfoIsMutable();
+ regionInfo_.add(value);
+ onChanged();
+ } else {
+ regionInfoBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ /**
+ * repeated .RegionInfo region_info = 5;
+ */
+ public Builder addRegionInfo(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) {
+ if (regionInfoBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureRegionInfoIsMutable();
+ regionInfo_.add(index, value);
+ onChanged();
+ } else {
+ regionInfoBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .RegionInfo region_info = 5;
+ */
+ public Builder addRegionInfo(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) {
+ if (regionInfoBuilder_ == null) {
+ ensureRegionInfoIsMutable();
+ regionInfo_.add(builderForValue.build());
+ onChanged();
+ } else {
+ regionInfoBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .RegionInfo region_info = 5;
+ */
+ public Builder addRegionInfo(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) {
+ if (regionInfoBuilder_ == null) {
+ ensureRegionInfoIsMutable();
+ regionInfo_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ regionInfoBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .RegionInfo region_info = 5;
+ */
+ public Builder addAllRegionInfo(
+ java.lang.Iterable extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo> values) {
+ if (regionInfoBuilder_ == null) {
+ ensureRegionInfoIsMutable();
+ super.addAll(values, regionInfo_);
+ onChanged();
+ } else {
+ regionInfoBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ /**
+ * repeated .RegionInfo region_info = 5;
+ */
+ public Builder clearRegionInfo() {
+ if (regionInfoBuilder_ == null) {
+ regionInfo_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000010);
+ onChanged();
+ } else {
+ regionInfoBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ * repeated .RegionInfo region_info = 5;
+ */
+ public Builder removeRegionInfo(int index) {
+ if (regionInfoBuilder_ == null) {
+ ensureRegionInfoIsMutable();
+ regionInfo_.remove(index);
+ onChanged();
+ } else {
+ regionInfoBuilder_.remove(index);
+ }
+ return this;
+ }
+ /**
+ * repeated .RegionInfo region_info = 5;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder getRegionInfoBuilder(
+ int index) {
+ return getRegionInfoFieldBuilder().getBuilder(index);
+ }
+ /**
+ * repeated .RegionInfo region_info = 5;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder(
+ int index) {
+ if (regionInfoBuilder_ == null) {
+ return regionInfo_.get(index); } else {
+ return regionInfoBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ /**
+ * repeated .RegionInfo region_info = 5;
+ */
+ public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>
+ getRegionInfoOrBuilderList() {
+ if (regionInfoBuilder_ != null) {
+ return regionInfoBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(regionInfo_);
+ }
+ }
+ /**
+ * repeated .RegionInfo region_info = 5;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder addRegionInfoBuilder() {
+ return getRegionInfoFieldBuilder().addBuilder(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance());
+ }
+ /**
+ * repeated .RegionInfo region_info = 5;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder addRegionInfoBuilder(
+ int index) {
+ return getRegionInfoFieldBuilder().addBuilder(
+ index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance());
+ }
+ /**
+ * repeated .RegionInfo region_info = 5;
+ */
+ public java.util.List
+ getRegionInfoBuilderList() {
+ return getRegionInfoFieldBuilder().getBuilderList();
+ }
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>
+ getRegionInfoFieldBuilder() {
+ if (regionInfoBuilder_ == null) {
+ regionInfoBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>(
+ regionInfo_,
+ ((bitField0_ & 0x00000010) == 0x00000010),
+ getParentForChildren(),
+ isClean());
+ regionInfo_ = null;
+ }
+ return regionInfoBuilder_;
+ }
+
+ // @@protoc_insertion_point(builder_scope:TruncateTableStateData)
+ }
+
+ static {
+ defaultInstance = new TruncateTableStateData(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:TruncateTableStateData)
+ }
+
public interface DeleteTableStateDataOrBuilder
extends com.google.protobuf.MessageOrBuilder {
@@ -9618,6 +11210,11 @@ public final class MasterProcedureProtos {
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_ModifyTableStateData_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_TruncateTableStateData_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_TruncateTableStateData_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_DeleteTableStateData_descriptor;
private static
@@ -9666,81 +11263,92 @@ public final class MasterProcedureProtos {
"n\022-\n\027unmodified_table_schema\030\002 \001(\0132\014.Tab" +
"leSchema\022+\n\025modified_table_schema\030\003 \002(\0132" +
"\014.TableSchema\022&\n\036delete_column_family_in" +
- "_modify\030\004 \002(\010\"}\n\024DeleteTableStateData\022#\n",
- "\tuser_info\030\001 \002(\0132\020.UserInformation\022\036\n\nta" +
- "ble_name\030\002 \002(\0132\n.TableName\022 \n\013region_inf" +
- "o\030\003 \003(\0132\013.RegionInfo\"\300\001\n\030AddColumnFamily" +
+ "_modify\030\004 \002(\010\"\274\001\n\026TruncateTableStateData",
+ "\022#\n\tuser_info\030\001 \002(\0132\020.UserInformation\022\027\n" +
+ "\017preserve_splits\030\002 \002(\010\022\036\n\ntable_name\030\003 \001" +
+ "(\0132\n.TableName\022\"\n\014table_schema\030\004 \001(\0132\014.T" +
+ "ableSchema\022 \n\013region_info\030\005 \003(\0132\013.Region" +
+ "Info\"}\n\024DeleteTableStateData\022#\n\tuser_inf" +
+ "o\030\001 \002(\0132\020.UserInformation\022\036\n\ntable_name\030" +
+ "\002 \002(\0132\n.TableName\022 \n\013region_info\030\003 \003(\0132\013" +
+ ".RegionInfo\"\300\001\n\030AddColumnFamilyStateData" +
+ "\022#\n\tuser_info\030\001 \002(\0132\020.UserInformation\022\036\n" +
+ "\ntable_name\030\002 \002(\0132\n.TableName\0220\n\023columnf",
+ "amily_schema\030\003 \002(\0132\023.ColumnFamilySchema\022" +
+ "-\n\027unmodified_table_schema\030\004 \001(\0132\014.Table" +
+ "Schema\"\303\001\n\033ModifyColumnFamilyStateData\022#" +
+ "\n\tuser_info\030\001 \002(\0132\020.UserInformation\022\036\n\nt" +
+ "able_name\030\002 \002(\0132\n.TableName\0220\n\023columnfam" +
+ "ily_schema\030\003 \002(\0132\023.ColumnFamilySchema\022-\n" +
+ "\027unmodified_table_schema\030\004 \001(\0132\014.TableSc" +
+ "hema\"\254\001\n\033DeleteColumnFamilyStateData\022#\n\t" +
+ "user_info\030\001 \002(\0132\020.UserInformation\022\036\n\ntab" +
+ "le_name\030\002 \002(\0132\n.TableName\022\031\n\021columnfamil",
+ "y_name\030\003 \002(\014\022-\n\027unmodified_table_schema\030" +
+ "\004 \001(\0132\014.TableSchema\"{\n\024EnableTableStateD" +
+ "ata\022#\n\tuser_info\030\001 \002(\0132\020.UserInformation" +
+ "\022\036\n\ntable_name\030\002 \002(\0132\n.TableName\022\036\n\026skip" +
+ "_table_state_check\030\003 \002(\010\"|\n\025DisableTable" +
"StateData\022#\n\tuser_info\030\001 \002(\0132\020.UserInfor" +
- "mation\022\036\n\ntable_name\030\002 \002(\0132\n.TableName\0220" +
- "\n\023columnfamily_schema\030\003 \002(\0132\023.ColumnFami" +
- "lySchema\022-\n\027unmodified_table_schema\030\004 \001(" +
- "\0132\014.TableSchema\"\303\001\n\033ModifyColumnFamilySt" +
- "ateData\022#\n\tuser_info\030\001 \002(\0132\020.UserInforma" +
- "tion\022\036\n\ntable_name\030\002 \002(\0132\n.TableName\0220\n\023",
- "columnfamily_schema\030\003 \002(\0132\023.ColumnFamily" +
- "Schema\022-\n\027unmodified_table_schema\030\004 \001(\0132" +
- "\014.TableSchema\"\254\001\n\033DeleteColumnFamilyStat" +
- "eData\022#\n\tuser_info\030\001 \002(\0132\020.UserInformati" +
- "on\022\036\n\ntable_name\030\002 \002(\0132\n.TableName\022\031\n\021co" +
- "lumnfamily_name\030\003 \002(\014\022-\n\027unmodified_tabl" +
- "e_schema\030\004 \001(\0132\014.TableSchema\"{\n\024EnableTa" +
- "bleStateData\022#\n\tuser_info\030\001 \002(\0132\020.UserIn" +
- "formation\022\036\n\ntable_name\030\002 \002(\0132\n.TableNam" +
- "e\022\036\n\026skip_table_state_check\030\003 \002(\010\"|\n\025Dis",
- "ableTableStateData\022#\n\tuser_info\030\001 \002(\0132\020." +
- "UserInformation\022\036\n\ntable_name\030\002 \002(\0132\n.Ta" +
- "bleName\022\036\n\026skip_table_state_check\030\003 \002(\010*" +
- "\330\001\n\020CreateTableState\022\036\n\032CREATE_TABLE_PRE" +
- "_OPERATION\020\001\022 \n\034CREATE_TABLE_WRITE_FS_LA" +
- "YOUT\020\002\022\034\n\030CREATE_TABLE_ADD_TO_META\020\003\022\037\n\033" +
- "CREATE_TABLE_ASSIGN_REGIONS\020\004\022\"\n\036CREATE_" +
- "TABLE_UPDATE_DESC_CACHE\020\005\022\037\n\033CREATE_TABL" +
- "E_POST_OPERATION\020\006*\207\002\n\020ModifyTableState\022" +
- "\030\n\024MODIFY_TABLE_PREPARE\020\001\022\036\n\032MODIFY_TABL",
- "E_PRE_OPERATION\020\002\022(\n$MODIFY_TABLE_UPDATE" +
- "_TABLE_DESCRIPTOR\020\003\022&\n\"MODIFY_TABLE_REMO" +
- "VE_REPLICA_COLUMN\020\004\022!\n\035MODIFY_TABLE_DELE" +
- "TE_FS_LAYOUT\020\005\022\037\n\033MODIFY_TABLE_POST_OPER" +
- "ATION\020\006\022#\n\037MODIFY_TABLE_REOPEN_ALL_REGIO" +
- "NS\020\007*\337\001\n\020DeleteTableState\022\036\n\032DELETE_TABL" +
- "E_PRE_OPERATION\020\001\022!\n\035DELETE_TABLE_REMOVE" +
- "_FROM_META\020\002\022 \n\034DELETE_TABLE_CLEAR_FS_LA" +
- "YOUT\020\003\022\"\n\036DELETE_TABLE_UPDATE_DESC_CACHE" +
- "\020\004\022!\n\035DELETE_TABLE_UNASSIGN_REGIONS\020\005\022\037\n",
- "\033DELETE_TABLE_POST_OPERATION\020\006*\331\001\n\024AddCo" +
- "lumnFamilyState\022\035\n\031ADD_COLUMN_FAMILY_PRE" +
- "PARE\020\001\022#\n\037ADD_COLUMN_FAMILY_PRE_OPERATIO" +
- "N\020\002\022-\n)ADD_COLUMN_FAMILY_UPDATE_TABLE_DE" +
- "SCRIPTOR\020\003\022$\n ADD_COLUMN_FAMILY_POST_OPE" +
- "RATION\020\004\022(\n$ADD_COLUMN_FAMILY_REOPEN_ALL" +
- "_REGIONS\020\005*\353\001\n\027ModifyColumnFamilyState\022 " +
- "\n\034MODIFY_COLUMN_FAMILY_PREPARE\020\001\022&\n\"MODI" +
- "FY_COLUMN_FAMILY_PRE_OPERATION\020\002\0220\n,MODI" +
- "FY_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR",
- "\020\003\022\'\n#MODIFY_COLUMN_FAMILY_POST_OPERATIO" +
- "N\020\004\022+\n\'MODIFY_COLUMN_FAMILY_REOPEN_ALL_R" +
- "EGIONS\020\005*\226\002\n\027DeleteColumnFamilyState\022 \n\034" +
- "DELETE_COLUMN_FAMILY_PREPARE\020\001\022&\n\"DELETE" +
- "_COLUMN_FAMILY_PRE_OPERATION\020\002\0220\n,DELETE" +
- "_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR\020\003" +
- "\022)\n%DELETE_COLUMN_FAMILY_DELETE_FS_LAYOU" +
- "T\020\004\022\'\n#DELETE_COLUMN_FAMILY_POST_OPERATI" +
- "ON\020\005\022+\n\'DELETE_COLUMN_FAMILY_REOPEN_ALL_" +
- "REGIONS\020\006*\350\001\n\020EnableTableState\022\030\n\024ENABLE",
- "_TABLE_PREPARE\020\001\022\036\n\032ENABLE_TABLE_PRE_OPE" +
- "RATION\020\002\022)\n%ENABLE_TABLE_SET_ENABLING_TA" +
- "BLE_STATE\020\003\022$\n ENABLE_TABLE_MARK_REGIONS" +
- "_ONLINE\020\004\022(\n$ENABLE_TABLE_SET_ENABLED_TA" +
- "BLE_STATE\020\005\022\037\n\033ENABLE_TABLE_POST_OPERATI" +
- "ON\020\006*\362\001\n\021DisableTableState\022\031\n\025DISABLE_TA" +
- "BLE_PREPARE\020\001\022\037\n\033DISABLE_TABLE_PRE_OPERA" +
- "TION\020\002\022+\n\'DISABLE_TABLE_SET_DISABLING_TA" +
- "BLE_STATE\020\003\022&\n\"DISABLE_TABLE_MARK_REGION" +
- "S_OFFLINE\020\004\022*\n&DISABLE_TABLE_SET_DISABLE",
- "D_TABLE_STATE\020\005\022 \n\034DISABLE_TABLE_POST_OP" +
- "ERATION\020\006BK\n*org.apache.hadoop.hbase.pro" +
- "tobuf.generatedB\025MasterProcedureProtosH\001" +
- "\210\001\001\240\001\001"
+ "mation\022\036\n\ntable_name\030\002 \002(\0132\n.TableName\022\036" +
+ "\n\026skip_table_state_check\030\003 \002(\010*\330\001\n\020Creat" +
+ "eTableState\022\036\n\032CREATE_TABLE_PRE_OPERATIO" +
+ "N\020\001\022 \n\034CREATE_TABLE_WRITE_FS_LAYOUT\020\002\022\034\n",
+ "\030CREATE_TABLE_ADD_TO_META\020\003\022\037\n\033CREATE_TA" +
+ "BLE_ASSIGN_REGIONS\020\004\022\"\n\036CREATE_TABLE_UPD" +
+ "ATE_DESC_CACHE\020\005\022\037\n\033CREATE_TABLE_POST_OP" +
+ "ERATION\020\006*\207\002\n\020ModifyTableState\022\030\n\024MODIFY" +
+ "_TABLE_PREPARE\020\001\022\036\n\032MODIFY_TABLE_PRE_OPE" +
+ "RATION\020\002\022(\n$MODIFY_TABLE_UPDATE_TABLE_DE" +
+ "SCRIPTOR\020\003\022&\n\"MODIFY_TABLE_REMOVE_REPLIC" +
+ "A_COLUMN\020\004\022!\n\035MODIFY_TABLE_DELETE_FS_LAY" +
+ "OUT\020\005\022\037\n\033MODIFY_TABLE_POST_OPERATION\020\006\022#" +
+ "\n\037MODIFY_TABLE_REOPEN_ALL_REGIONS\020\007*\212\002\n\022",
+ "TruncateTableState\022 \n\034TRUNCATE_TABLE_PRE" +
+ "_OPERATION\020\001\022#\n\037TRUNCATE_TABLE_REMOVE_FR" +
+ "OM_META\020\002\022\"\n\036TRUNCATE_TABLE_CLEAR_FS_LAY" +
+ "OUT\020\003\022#\n\037TRUNCATE_TABLE_CREATE_FS_LAYOUT" +
+ "\020\004\022\036\n\032TRUNCATE_TABLE_ADD_TO_META\020\005\022!\n\035TR" +
+ "UNCATE_TABLE_ASSIGN_REGIONS\020\006\022!\n\035TRUNCAT" +
+ "E_TABLE_POST_OPERATION\020\007*\337\001\n\020DeleteTable" +
+ "State\022\036\n\032DELETE_TABLE_PRE_OPERATION\020\001\022!\n" +
+ "\035DELETE_TABLE_REMOVE_FROM_META\020\002\022 \n\034DELE" +
+ "TE_TABLE_CLEAR_FS_LAYOUT\020\003\022\"\n\036DELETE_TAB",
+ "LE_UPDATE_DESC_CACHE\020\004\022!\n\035DELETE_TABLE_U" +
+ "NASSIGN_REGIONS\020\005\022\037\n\033DELETE_TABLE_POST_O" +
+ "PERATION\020\006*\331\001\n\024AddColumnFamilyState\022\035\n\031A" +
+ "DD_COLUMN_FAMILY_PREPARE\020\001\022#\n\037ADD_COLUMN" +
+ "_FAMILY_PRE_OPERATION\020\002\022-\n)ADD_COLUMN_FA" +
+ "MILY_UPDATE_TABLE_DESCRIPTOR\020\003\022$\n ADD_CO" +
+ "LUMN_FAMILY_POST_OPERATION\020\004\022(\n$ADD_COLU" +
+ "MN_FAMILY_REOPEN_ALL_REGIONS\020\005*\353\001\n\027Modif" +
+ "yColumnFamilyState\022 \n\034MODIFY_COLUMN_FAMI" +
+ "LY_PREPARE\020\001\022&\n\"MODIFY_COLUMN_FAMILY_PRE",
+ "_OPERATION\020\002\0220\n,MODIFY_COLUMN_FAMILY_UPD" +
+ "ATE_TABLE_DESCRIPTOR\020\003\022\'\n#MODIFY_COLUMN_" +
+ "FAMILY_POST_OPERATION\020\004\022+\n\'MODIFY_COLUMN" +
+ "_FAMILY_REOPEN_ALL_REGIONS\020\005*\226\002\n\027DeleteC" +
+ "olumnFamilyState\022 \n\034DELETE_COLUMN_FAMILY" +
+ "_PREPARE\020\001\022&\n\"DELETE_COLUMN_FAMILY_PRE_O" +
+ "PERATION\020\002\0220\n,DELETE_COLUMN_FAMILY_UPDAT" +
+ "E_TABLE_DESCRIPTOR\020\003\022)\n%DELETE_COLUMN_FA" +
+ "MILY_DELETE_FS_LAYOUT\020\004\022\'\n#DELETE_COLUMN" +
+ "_FAMILY_POST_OPERATION\020\005\022+\n\'DELETE_COLUM",
+ "N_FAMILY_REOPEN_ALL_REGIONS\020\006*\350\001\n\020Enable" +
+ "TableState\022\030\n\024ENABLE_TABLE_PREPARE\020\001\022\036\n\032" +
+ "ENABLE_TABLE_PRE_OPERATION\020\002\022)\n%ENABLE_T" +
+ "ABLE_SET_ENABLING_TABLE_STATE\020\003\022$\n ENABL" +
+ "E_TABLE_MARK_REGIONS_ONLINE\020\004\022(\n$ENABLE_" +
+ "TABLE_SET_ENABLED_TABLE_STATE\020\005\022\037\n\033ENABL" +
+ "E_TABLE_POST_OPERATION\020\006*\362\001\n\021DisableTabl" +
+ "eState\022\031\n\025DISABLE_TABLE_PREPARE\020\001\022\037\n\033DIS" +
+ "ABLE_TABLE_PRE_OPERATION\020\002\022+\n\'DISABLE_TA" +
+ "BLE_SET_DISABLING_TABLE_STATE\020\003\022&\n\"DISAB",
+ "LE_TABLE_MARK_REGIONS_OFFLINE\020\004\022*\n&DISAB" +
+ "LE_TABLE_SET_DISABLED_TABLE_STATE\020\005\022 \n\034D" +
+ "ISABLE_TABLE_POST_OPERATION\020\006BK\n*org.apa" +
+ "che.hadoop.hbase.protobuf.generatedB\025Mas" +
+ "terProcedureProtosH\001\210\001\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -9759,38 +11367,44 @@ public final class MasterProcedureProtos {
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_ModifyTableStateData_descriptor,
new java.lang.String[] { "UserInfo", "UnmodifiedTableSchema", "ModifiedTableSchema", "DeleteColumnFamilyInModify", });
- internal_static_DeleteTableStateData_descriptor =
+ internal_static_TruncateTableStateData_descriptor =
getDescriptor().getMessageTypes().get(2);
+ internal_static_TruncateTableStateData_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_TruncateTableStateData_descriptor,
+ new java.lang.String[] { "UserInfo", "PreserveSplits", "TableName", "TableSchema", "RegionInfo", });
+ internal_static_DeleteTableStateData_descriptor =
+ getDescriptor().getMessageTypes().get(3);
internal_static_DeleteTableStateData_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_DeleteTableStateData_descriptor,
new java.lang.String[] { "UserInfo", "TableName", "RegionInfo", });
internal_static_AddColumnFamilyStateData_descriptor =
- getDescriptor().getMessageTypes().get(3);
+ getDescriptor().getMessageTypes().get(4);
internal_static_AddColumnFamilyStateData_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_AddColumnFamilyStateData_descriptor,
new java.lang.String[] { "UserInfo", "TableName", "ColumnfamilySchema", "UnmodifiedTableSchema", });
internal_static_ModifyColumnFamilyStateData_descriptor =
- getDescriptor().getMessageTypes().get(4);
+ getDescriptor().getMessageTypes().get(5);
internal_static_ModifyColumnFamilyStateData_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_ModifyColumnFamilyStateData_descriptor,
new java.lang.String[] { "UserInfo", "TableName", "ColumnfamilySchema", "UnmodifiedTableSchema", });
internal_static_DeleteColumnFamilyStateData_descriptor =
- getDescriptor().getMessageTypes().get(5);
+ getDescriptor().getMessageTypes().get(6);
internal_static_DeleteColumnFamilyStateData_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_DeleteColumnFamilyStateData_descriptor,
new java.lang.String[] { "UserInfo", "TableName", "ColumnfamilyName", "UnmodifiedTableSchema", });
internal_static_EnableTableStateData_descriptor =
- getDescriptor().getMessageTypes().get(6);
+ getDescriptor().getMessageTypes().get(7);
internal_static_EnableTableStateData_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_EnableTableStateData_descriptor,
new java.lang.String[] { "UserInfo", "TableName", "SkipTableStateCheck", });
internal_static_DisableTableStateData_descriptor =
- getDescriptor().getMessageTypes().get(7);
+ getDescriptor().getMessageTypes().get(8);
internal_static_DisableTableStateData_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_DisableTableStateData_descriptor,
diff --git a/hbase-protocol/src/main/protobuf/MasterProcedure.proto b/hbase-protocol/src/main/protobuf/MasterProcedure.proto
index a9ad0e0ac3d..e1c6880838f 100644
--- a/hbase-protocol/src/main/protobuf/MasterProcedure.proto
+++ b/hbase-protocol/src/main/protobuf/MasterProcedure.proto
@@ -75,6 +75,24 @@ message ModifyTableStateData {
required bool delete_column_family_in_modify = 4;
}
+enum TruncateTableState {
+ TRUNCATE_TABLE_PRE_OPERATION = 1;
+ TRUNCATE_TABLE_REMOVE_FROM_META = 2;
+ TRUNCATE_TABLE_CLEAR_FS_LAYOUT = 3;
+ TRUNCATE_TABLE_CREATE_FS_LAYOUT = 4;
+ TRUNCATE_TABLE_ADD_TO_META = 5;
+ TRUNCATE_TABLE_ASSIGN_REGIONS = 6;
+ TRUNCATE_TABLE_POST_OPERATION = 7;
+}
+
+message TruncateTableStateData {
+ required UserInformation user_info = 1;
+ required bool preserve_splits = 2;
+ optional TableName table_name = 3;
+ optional TableSchema table_schema = 4;
+ repeated RegionInfo region_info = 5;
+}
+
enum DeleteTableState {
DELETE_TABLE_PRE_OPERATION = 1;
DELETE_TABLE_REMOVE_FROM_META = 2;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index fdbc31ca783..45bcdcb5cbe 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -88,7 +88,6 @@ import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
import org.apache.hadoop.hbase.master.cleaner.LogCleaner;
import org.apache.hadoop.hbase.master.handler.DispatchMergingRegionHandler;
-import org.apache.hadoop.hbase.master.handler.TruncateTableHandler;
import org.apache.hadoop.hbase.master.procedure.AddColumnFamilyProcedure;
import org.apache.hadoop.hbase.master.procedure.CreateTableProcedure;
import org.apache.hadoop.hbase.master.procedure.DeleteColumnFamilyProcedure;
@@ -101,6 +100,7 @@ import org.apache.hadoop.hbase.master.procedure.ModifyColumnFamilyProcedure;
import org.apache.hadoop.hbase.master.procedure.ModifyTableProcedure;
import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
import org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait;
+import org.apache.hadoop.hbase.master.procedure.TruncateTableProcedure;
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
import org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer;
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
@@ -1599,9 +1599,11 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
cpHost.preTruncateTable(tableName);
}
LOG.info(getClientIdAuditPrefix() + " truncate " + tableName);
- TruncateTableHandler handler = new TruncateTableHandler(tableName, this, this, preserveSplits);
- handler.prepare();
- handler.process();
+
+ long procId = this.procedureExecutor.submitProcedure(
+ new TruncateTableProcedure(procedureExecutor.getEnvironment(), tableName, preserveSplits));
+ ProcedureSyncWait.waitForProcedureToComplete(procedureExecutor, procId);
+
if (cpHost != null) {
cpHost.postTruncateTable(tableName);
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java
index 84e9bef4179..2582a1e0071 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java
@@ -106,14 +106,15 @@ public class DeleteTableProcedure
return Flow.NO_MORE_STATE;
}
- preDelete(env);
-
// TODO: Move out... in the acquireLock()
LOG.debug("waiting for '" + getTableName() + "' regions in transition");
regions = ProcedureSyncWait.getRegionsFromMeta(env, getTableName());
assert regions != null && !regions.isEmpty() : "unexpected 0 regions";
ProcedureSyncWait.waitRegionInTransition(env, regions);
+ // Call coprocessors
+ preDelete(env);
+
setNextState(DeleteTableState.DELETE_TABLE_REMOVE_FROM_META);
break;
case DELETE_TABLE_REMOVE_FROM_META:
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java
new file mode 100644
index 00000000000..5ef0a19905b
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java
@@ -0,0 +1,291 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import java.io.InputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.security.PrivilegedExceptionAction;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNotDisabledException;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.exceptions.HBaseException;
+import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableState;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
+import org.apache.hadoop.hbase.util.ModifyRegionUtils;
+import org.apache.hadoop.security.UserGroupInformation;
+
+@InterfaceAudience.Private
+public class TruncateTableProcedure
+ extends StateMachineProcedure
+ implements TableProcedureInterface {
+ private static final Log LOG = LogFactory.getLog(TruncateTableProcedure.class);
+
+ private boolean preserveSplits;
+ private List regions;
+ private UserGroupInformation user;
+ private HTableDescriptor hTableDescriptor;
+ private TableName tableName;
+
+ public TruncateTableProcedure() {
+ // Required by the Procedure framework to create the procedure on replay
+ }
+
+ public TruncateTableProcedure(final MasterProcedureEnv env, final TableName tableName,
+ boolean preserveSplits) throws IOException {
+ this.tableName = tableName;
+ this.preserveSplits = preserveSplits;
+ this.user = env.getRequestUser().getUGI();
+ }
+
+ @Override
+ protected Flow executeFromState(final MasterProcedureEnv env, TruncateTableState state) {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(this + " execute state=" + state);
+ }
+ try {
+ switch (state) {
+ case TRUNCATE_TABLE_PRE_OPERATION:
+ // Verify if we can truncate the table
+ if (!prepareTruncate(env)) {
+ assert isFailed() : "the truncate should have an exception here";
+ return Flow.NO_MORE_STATE;
+ }
+
+ // TODO: Move out... in the acquireLock()
+ LOG.debug("waiting for '" + getTableName() + "' regions in transition");
+ regions = ProcedureSyncWait.getRegionsFromMeta(env, getTableName());
+ assert regions != null && !regions.isEmpty() : "unexpected 0 regions";
+ ProcedureSyncWait.waitRegionInTransition(env, regions);
+
+ // Call coprocessors
+ preTruncate(env);
+
+ setNextState(TruncateTableState.TRUNCATE_TABLE_REMOVE_FROM_META);
+ break;
+ case TRUNCATE_TABLE_REMOVE_FROM_META:
+ hTableDescriptor = env.getMasterServices().getTableDescriptors()
+ .getDescriptor(tableName).getHTableDescriptor();
+ DeleteTableProcedure.deleteFromMeta(env, getTableName(), regions);
+ DeleteTableProcedure.deleteAssignmentState(env, getTableName());
+ setNextState(TruncateTableState.TRUNCATE_TABLE_CLEAR_FS_LAYOUT);
+ break;
+ case TRUNCATE_TABLE_CLEAR_FS_LAYOUT:
+ DeleteTableProcedure.deleteFromFs(env, getTableName(), regions, true);
+ if (!preserveSplits) {
+ // if we are not preserving splits, generate a new single region
+ regions = Arrays.asList(ModifyRegionUtils.createHRegionInfos(hTableDescriptor, null));
+ }
+ setNextState(TruncateTableState.TRUNCATE_TABLE_CREATE_FS_LAYOUT);
+ break;
+ case TRUNCATE_TABLE_CREATE_FS_LAYOUT:
+ regions = CreateTableProcedure.createFsLayout(env, hTableDescriptor, regions);
+ CreateTableProcedure.updateTableDescCache(env, getTableName());
+ setNextState(TruncateTableState.TRUNCATE_TABLE_ADD_TO_META);
+ break;
+ case TRUNCATE_TABLE_ADD_TO_META:
+ regions = CreateTableProcedure.addTableToMeta(env, hTableDescriptor, regions);
+ setNextState(TruncateTableState.TRUNCATE_TABLE_ASSIGN_REGIONS);
+ break;
+ case TRUNCATE_TABLE_ASSIGN_REGIONS:
+ CreateTableProcedure.assignRegions(env, getTableName(), regions);
+ setNextState(TruncateTableState.TRUNCATE_TABLE_POST_OPERATION);
+ hTableDescriptor = null;
+ regions = null;
+ break;
+ case TRUNCATE_TABLE_POST_OPERATION:
+ postTruncate(env);
+ LOG.debug("truncate '" + getTableName() + "' completed");
+ return Flow.NO_MORE_STATE;
+ default:
+ throw new UnsupportedOperationException("unhandled state=" + state);
+ }
+ } catch (HBaseException|IOException e) {
+ LOG.warn("Retriable error trying to truncate table=" + getTableName() + " state=" + state, e);
+ } catch (InterruptedException e) {
+ // if the interrupt is real, the executor will be stopped.
+ LOG.warn("Interrupted trying to truncate table=" + getTableName() + " state=" + state, e);
+ }
+ return Flow.HAS_MORE_STATE;
+ }
+
+ @Override
+ protected void rollbackState(final MasterProcedureEnv env, final TruncateTableState state) {
+ if (state == TruncateTableState.TRUNCATE_TABLE_PRE_OPERATION) {
+ // nothing to rollback, pre-truncate is just table-state checks.
+ // We can fail if the table does not exist or is not disabled.
+ return;
+ }
+
+ // The truncate doesn't have a rollback. The execution will succeed, at some point.
+ throw new UnsupportedOperationException("unhandled state=" + state);
+ }
+
+ @Override
+ protected TruncateTableState getState(final int stateId) {
+ return TruncateTableState.valueOf(stateId);
+ }
+
+ @Override
+ protected int getStateId(final TruncateTableState state) {
+ return state.getNumber();
+ }
+
+ @Override
+ protected TruncateTableState getInitialState() {
+ return TruncateTableState.TRUNCATE_TABLE_PRE_OPERATION;
+ }
+
+ @Override
+ public TableName getTableName() {
+ return tableName;
+ }
+
+ @Override
+ public TableOperationType getTableOperationType() {
+ return TableOperationType.EDIT;
+ }
+
+ @Override
+ public boolean abort(final MasterProcedureEnv env) {
+ // TODO: We may be able to abort if the procedure is not started yet.
+ return false;
+ }
+
+ @Override
+ protected boolean acquireLock(final MasterProcedureEnv env) {
+ if (!env.isInitialized()) return false;
+ return env.getProcedureQueue().tryAcquireTableWrite(getTableName(), "truncate table");
+ }
+
+ @Override
+ protected void releaseLock(final MasterProcedureEnv env) {
+ env.getProcedureQueue().releaseTableWrite(getTableName());
+ }
+
+ @Override
+ public void toStringClassDetails(StringBuilder sb) {
+ sb.append(getClass().getSimpleName());
+ sb.append(" (table=");
+ sb.append(getTableName());
+ sb.append(" preserveSplits=");
+ sb.append(preserveSplits);
+ sb.append(") user=");
+ sb.append(user);
+ }
+
+ @Override
+ public void serializeStateData(final OutputStream stream) throws IOException {
+ super.serializeStateData(stream);
+
+ MasterProcedureProtos.TruncateTableStateData.Builder state =
+ MasterProcedureProtos.TruncateTableStateData.newBuilder()
+ .setUserInfo(MasterProcedureUtil.toProtoUserInfo(this.user))
+ .setPreserveSplits(preserveSplits);
+ if (hTableDescriptor != null) {
+ state.setTableSchema(hTableDescriptor.convert());
+ } else {
+ state.setTableName(ProtobufUtil.toProtoTableName(tableName));
+ }
+ if (regions != null) {
+ for (HRegionInfo hri: regions) {
+ state.addRegionInfo(HRegionInfo.convert(hri));
+ }
+ }
+ state.build().writeDelimitedTo(stream);
+ }
+
+ @Override
+ public void deserializeStateData(final InputStream stream) throws IOException {
+ super.deserializeStateData(stream);
+
+ MasterProcedureProtos.TruncateTableStateData state =
+ MasterProcedureProtos.TruncateTableStateData.parseDelimitedFrom(stream);
+ user = MasterProcedureUtil.toUserInfo(state.getUserInfo());
+ if (state.hasTableSchema()) {
+ hTableDescriptor = HTableDescriptor.convert(state.getTableSchema());
+ tableName = hTableDescriptor.getTableName();
+ } else {
+ tableName = ProtobufUtil.toTableName(state.getTableName());
+ }
+ preserveSplits = state.getPreserveSplits();
+ if (state.getRegionInfoCount() == 0) {
+ regions = null;
+ } else {
+ regions = new ArrayList(state.getRegionInfoCount());
+ for (HBaseProtos.RegionInfo hri: state.getRegionInfoList()) {
+ regions.add(HRegionInfo.convert(hri));
+ }
+ }
+ }
+
+ private boolean prepareTruncate(final MasterProcedureEnv env) throws IOException {
+ try {
+ env.getMasterServices().checkTableModifiable(getTableName());
+ } catch (TableNotFoundException|TableNotDisabledException e) {
+ setFailure("master-truncate-table", e);
+ return false;
+ }
+ return true;
+ }
+
+ private boolean preTruncate(final MasterProcedureEnv env)
+ throws IOException, InterruptedException {
+ final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
+ if (cpHost != null) {
+ final TableName tableName = getTableName();
+ user.doAs(new PrivilegedExceptionAction() {
+ @Override
+ public Void run() throws Exception {
+ cpHost.preTruncateTableHandler(tableName);
+ return null;
+ }
+ });
+ }
+ return true;
+ }
+
+ private void postTruncate(final MasterProcedureEnv env)
+ throws IOException, InterruptedException {
+ final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
+ if (cpHost != null) {
+ final TableName tableName = getTableName();
+ user.doAs(new PrivilegedExceptionAction() {
+ @Override
+ public Void run() throws Exception {
+ cpHost.postTruncateTableHandler(tableName);
+ return null;
+ }
+ });
+ }
+ }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index ff795697338..9cb0d57c397 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -2290,6 +2290,18 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
return count;
}
+ /**
+ * Return the number of rows in the given table.
+ */
+ public int countRows(final TableName tableName) throws IOException {
+ Table table = getConnection().getTable(tableName);
+ try {
+ return countRows(table);
+ } finally {
+ table.close();
+ }
+ }
+
/**
* Return an md5 digest of the entire contents of a table.
*/
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
index 9bb436ec3b6..57a15e81a91 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
@@ -35,6 +35,10 @@ import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableDescriptor;
+import org.apache.hadoop.hbase.client.BufferedMutator;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Durability;
+import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.TableState;
import org.apache.hadoop.hbase.master.HMaster;
@@ -44,6 +48,7 @@ import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
import org.apache.hadoop.hbase.util.ModifyRegionUtils;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.MD5Hash;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
@@ -85,6 +90,7 @@ public class MasterProcedureTestingUtility {
final FileSystem fs = master.getMasterFileSystem().getFileSystem();
final Path tableDir = FSUtils.getTableDir(master.getMasterFileSystem().getRootDir(), tableName);
assertTrue(fs.exists(tableDir));
+ FSUtils.logFileSystemState(fs, tableDir, LOG);
List allRegionDirs = FSUtils.getRegionDirs(fs, tableDir);
for (int i = 0; i < regions.length; ++i) {
Path regionDir = new Path(tableDir, regions[i].getEncodedName());
@@ -343,6 +349,43 @@ public class MasterProcedureTestingUtility {
assertTrue(hcfd.equals(columnDescriptor));
}
+ public static void loadData(final Connection connection, final TableName tableName,
+ int rows, final byte[][] splitKeys, final String... sfamilies) throws IOException {
+ byte[][] families = new byte[sfamilies.length][];
+ for (int i = 0; i < families.length; ++i) {
+ families[i] = Bytes.toBytes(sfamilies[i]);
+ }
+
+ BufferedMutator mutator = connection.getBufferedMutator(tableName);
+
+ // Ensure one row per region
+ assertTrue(rows >= splitKeys.length);
+ for (byte[] k: splitKeys) {
+ byte[] value = Bytes.add(Bytes.toBytes(System.currentTimeMillis()), k);
+ byte[] key = Bytes.add(k, Bytes.toBytes(MD5Hash.getMD5AsHex(value)));
+ mutator.mutate(createPut(families, key, value));
+ rows--;
+ }
+
+ // Add other extra rows. more rows, more files
+ while (rows-- > 0) {
+ byte[] value = Bytes.add(Bytes.toBytes(System.currentTimeMillis()), Bytes.toBytes(rows));
+ byte[] key = Bytes.toBytes(MD5Hash.getMD5AsHex(value));
+ mutator.mutate(createPut(families, key, value));
+ }
+ mutator.flush();
+ }
+
+ private static Put createPut(final byte[][] families, final byte[] key, final byte[] value) {
+ byte[] q = Bytes.toBytes("q");
+ Put put = new Put(key);
+ put.setDurability(Durability.SKIP_WAL);
+ for (byte[] family: families) {
+ put.add(family, q, value);
+ }
+ return put;
+ }
+
public static class InjectAbortOnLoadListener
implements ProcedureExecutor.ProcedureExecutorListener {
private final ProcedureExecutor procExec;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java
index 0f6c91037c4..25763023500 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTa
import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableState;
import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableState;
import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableState;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableState;
import org.apache.hadoop.hbase.testclassification.MasterTests;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.util.Bytes;
@@ -214,6 +215,67 @@ public class TestMasterFailoverWithProcedures {
UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2");
}
+ // ==========================================================================
+ // Test Truncate Table
+ // ==========================================================================
+ @Test(timeout=90000)
+ public void testTruncateWithFailover() throws Exception {
+ // TODO: Should we try every step? (master failover takes long time)
+ // It is already covered by TestTruncateTableProcedure
+ // but without the master restart, only the executor/store is restarted.
+ // Without Master restart we may not find bug in the procedure code
+ // like missing "wait" for resources to be available (e.g. RS)
+ testTruncateWithFailoverAtStep(true, TruncateTableState.TRUNCATE_TABLE_ADD_TO_META.ordinal());
+ }
+
+ private void testTruncateWithFailoverAtStep(final boolean preserveSplits, final int step)
+ throws Exception {
+ final TableName tableName = TableName.valueOf("testTruncateWithFailoverAtStep" + step);
+
+ // create the table
+ final String[] families = new String[] { "f1", "f2" };
+ final byte[][] splitKeys = new byte[][] {
+ Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c")
+ };
+ HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(
+ getMasterProcedureExecutor(), tableName, splitKeys, families);
+ // load and verify that there are rows in the table
+ MasterProcedureTestingUtility.loadData(
+ UTIL.getConnection(), tableName, 100, splitKeys, families);
+ assertEquals(100, UTIL.countRows(tableName));
+ // disable the table
+ UTIL.getHBaseAdmin().disableTable(tableName);
+
+ ProcedureExecutor procExec = getMasterProcedureExecutor();
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
+
+ // Start the Truncate procedure && kill the executor
+ long procId = procExec.submitProcedure(
+ new TruncateTableProcedure(procExec.getEnvironment(), tableName, preserveSplits));
+ testRecoveryAndDoubleExecution(UTIL, procId, step, TruncateTableState.values());
+
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, false);
+ UTIL.waitUntilAllRegionsAssigned(tableName);
+
+ // validate the table regions and layout
+ if (preserveSplits) {
+ assertEquals(1 + splitKeys.length, UTIL.getHBaseAdmin().getTableRegions(tableName).size());
+ } else {
+ regions = UTIL.getHBaseAdmin().getTableRegions(tableName).toArray(new HRegionInfo[1]);
+ assertEquals(1, regions.length);
+ }
+ MasterProcedureTestingUtility.validateTableCreation(
+ UTIL.getHBaseCluster().getMaster(), tableName, regions, families);
+
+ // verify that there are no rows in the table
+ assertEquals(0, UTIL.countRows(tableName));
+
+ // verify that the table is read/writable
+ MasterProcedureTestingUtility.loadData(
+ UTIL.getConnection(), tableName, 50, splitKeys, families);
+ assertEquals(50, UTIL.countRows(tableName));
+ }
+
// ==========================================================================
// Test Disable Table
// ==========================================================================
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTruncateTableProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTruncateTableProcedure.java
new file mode 100644
index 00000000000..58acbaedd41
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTruncateTableProcedure.java
@@ -0,0 +1,246 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNotDisabledException;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.procedure2.ProcedureResult;
+import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableState;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+@Category({MasterTests.class, MediumTests.class})
+public class TestTruncateTableProcedure {
+ private static final Log LOG = LogFactory.getLog(TestTruncateTableProcedure.class);
+
+ protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+
+ private static void setupConf(Configuration conf) {
+ conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1);
+ }
+
+ @BeforeClass
+ public static void setupCluster() throws Exception {
+ setupConf(UTIL.getConfiguration());
+ UTIL.startMiniCluster(1);
+ }
+
+ @AfterClass
+ public static void cleanupTest() throws Exception {
+ try {
+ UTIL.shutdownMiniCluster();
+ } catch (Exception e) {
+ LOG.warn("failure shutting down cluster", e);
+ }
+ }
+
+ @Before
+ public void setup() throws Exception {
+ final ProcedureExecutor procExec = getMasterProcedureExecutor();
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, false);
+ assertTrue("expected executor to be running", procExec.isRunning());
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false);
+ for (HTableDescriptor htd: UTIL.getHBaseAdmin().listTables()) {
+ LOG.info("Tear down, remove table=" + htd.getTableName());
+ UTIL.deleteTable(htd.getTableName());
+ }
+ }
+
+ @Test(timeout=60000)
+ public void testTruncateNotExistentTable() throws Exception {
+ final TableName tableName = TableName.valueOf("testTruncateNotExistentTable");
+
+ final ProcedureExecutor procExec = getMasterProcedureExecutor();
+ long procId = ProcedureTestingUtility.submitAndWait(procExec,
+ new TruncateTableProcedure(procExec.getEnvironment(), tableName, true));
+
+ // Second delete should fail with TableNotFound
+ ProcedureResult result = procExec.getResult(procId);
+ assertTrue(result.isFailed());
+ LOG.debug("Truncate failed with exception: " + result.getException());
+ assertTrue(result.getException().getCause() instanceof TableNotFoundException);
+ }
+
+ @Test(timeout=60000)
+ public void testTruncateNotDisabledTable() throws Exception {
+ final TableName tableName = TableName.valueOf("testTruncateNotDisabledTable");
+
+ final ProcedureExecutor procExec = getMasterProcedureExecutor();
+ MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f");
+
+ long procId = ProcedureTestingUtility.submitAndWait(procExec,
+ new TruncateTableProcedure(procExec.getEnvironment(), tableName, false));
+
+ // Second delete should fail with TableNotDisabled
+ ProcedureResult result = procExec.getResult(procId);
+ assertTrue(result.isFailed());
+ LOG.debug("Truncate failed with exception: " + result.getException());
+ assertTrue(result.getException().getCause() instanceof TableNotDisabledException);
+ }
+
+ @Test(timeout=60000)
+ public void testSimpleTruncatePreserveSplits() throws Exception {
+ final TableName tableName = TableName.valueOf("testSimpleTruncatePreserveSplits");
+ testSimpleTruncate(tableName, true);
+ }
+
+ @Test(timeout=60000)
+ public void testSimpleTruncateNoPreserveSplits() throws Exception {
+ final TableName tableName = TableName.valueOf("testSimpleTruncateNoPreserveSplits");
+ testSimpleTruncate(tableName, false);
+ }
+
+ private void testSimpleTruncate(final TableName tableName, final boolean preserveSplits)
+ throws Exception {
+ final String[] families = new String[] { "f1", "f2" };
+ final byte[][] splitKeys = new byte[][] {
+ Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c")
+ };
+
+ HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(
+ getMasterProcedureExecutor(), tableName, splitKeys, families);
+ // load and verify that there are rows in the table
+ MasterProcedureTestingUtility.loadData(
+ UTIL.getConnection(), tableName, 100, splitKeys, families);
+ assertEquals(100, UTIL.countRows(tableName));
+ // disable the table
+ UTIL.getHBaseAdmin().disableTable(tableName);
+
+ // truncate the table
+ final ProcedureExecutor procExec = getMasterProcedureExecutor();
+ long procId = ProcedureTestingUtility.submitAndWait(procExec,
+ new TruncateTableProcedure(procExec.getEnvironment(), tableName, preserveSplits));
+ ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
+
+ UTIL.waitUntilAllRegionsAssigned(tableName);
+
+ // validate the table regions and layout
+ if (preserveSplits) {
+ assertEquals(1 + splitKeys.length, UTIL.getHBaseAdmin().getTableRegions(tableName).size());
+ } else {
+ regions = UTIL.getHBaseAdmin().getTableRegions(tableName).toArray(new HRegionInfo[1]);
+ assertEquals(1, regions.length);
+ }
+ MasterProcedureTestingUtility.validateTableCreation(
+ UTIL.getHBaseCluster().getMaster(), tableName, regions, families);
+
+ // verify that there are no rows in the table
+ assertEquals(0, UTIL.countRows(tableName));
+
+ // verify that the table is read/writable
+ MasterProcedureTestingUtility.loadData(
+ UTIL.getConnection(), tableName, 50, splitKeys, families);
+ assertEquals(50, UTIL.countRows(tableName));
+ }
+
+ @Test(timeout=60000)
+ public void testRecoveryAndDoubleExecutionPreserveSplits() throws Exception {
+ final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecutionPreserveSplits");
+ testRecoveryAndDoubleExecution(tableName, true);
+ }
+
+ @Test(timeout=60000)
+ public void testRecoveryAndDoubleExecutionNoPreserveSplits() throws Exception {
+ final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecutionNoPreserveSplits");
+ testRecoveryAndDoubleExecution(tableName, false);
+ }
+
+ private void testRecoveryAndDoubleExecution(final TableName tableName,
+ final boolean preserveSplits) throws Exception {
+ final String[] families = new String[] { "f1", "f2" };
+
+ // create the table
+ final byte[][] splitKeys = new byte[][] {
+ Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c")
+ };
+ HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(
+ getMasterProcedureExecutor(), tableName, splitKeys, families);
+ // load and verify that there are rows in the table
+ MasterProcedureTestingUtility.loadData(
+ UTIL.getConnection(), tableName, 100, splitKeys, families);
+ assertEquals(100, UTIL.countRows(tableName));
+ // disable the table
+ UTIL.getHBaseAdmin().disableTable(tableName);
+
+ final ProcedureExecutor procExec = getMasterProcedureExecutor();
+ ProcedureTestingUtility.waitNoProcedureRunning(procExec);
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
+
+ // Start the Truncate procedure && kill the executor
+ long procId = procExec.submitProcedure(
+ new TruncateTableProcedure(procExec.getEnvironment(), tableName, preserveSplits));
+
+ // Restart the executor and execute the step twice
+ // NOTE: the 7 (number of TruncateTableState steps) is hardcoded,
+ // so you have to look at this test at least once when you add a new step.
+ MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(
+ procExec, procId, 7, TruncateTableState.values());
+
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, false);
+ UTIL.waitUntilAllRegionsAssigned(tableName);
+
+ // validate the table regions and layout
+ if (preserveSplits) {
+ assertEquals(1 + splitKeys.length, UTIL.getHBaseAdmin().getTableRegions(tableName).size());
+ } else {
+ regions = UTIL.getHBaseAdmin().getTableRegions(tableName).toArray(new HRegionInfo[1]);
+ assertEquals(1, regions.length);
+ }
+ MasterProcedureTestingUtility.validateTableCreation(
+ UTIL.getHBaseCluster().getMaster(), tableName, regions, families);
+
+ // verify that there are no rows in the table
+ assertEquals(0, UTIL.countRows(tableName));
+
+ // verify that the table is read/writable
+ MasterProcedureTestingUtility.loadData(
+ UTIL.getConnection(), tableName, 50, splitKeys, families);
+ assertEquals(50, UTIL.countRows(tableName));
+ }
+
+ private ProcedureExecutor getMasterProcedureExecutor() {
+ return UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor();
+ }
+}