HBASE-15467 Remove 1.x/2.0 TableDescriptor incompatibility

This commit is contained in:
Enis Soztutar 2016-06-17 17:16:57 -07:00
parent 65a8d77433
commit bdb0cc8808
27 changed files with 238 additions and 1222 deletions

View File

@ -42,7 +42,6 @@ import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema;
import org.apache.hadoop.hbase.regionserver.BloomType;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.util.Bytes;
@ -1422,49 +1421,6 @@ public class HTableDescriptor implements Comparable<HTableDescriptor> {
new Path(name.getNamespaceAsString(), new Path(name.getQualifierAsString()))));
}
/** Table descriptor for <code>hbase:meta</code> catalog table
* Deprecated, use TableDescriptors#get(TableName.META_TABLE) or
* Admin#getTableDescriptor(TableName.META_TABLE) instead.
*/
@Deprecated
public static final HTableDescriptor META_TABLEDESC = new HTableDescriptor(
TableName.META_TABLE_NAME,
new HColumnDescriptor[] {
new HColumnDescriptor(HConstants.CATALOG_FAMILY)
// Ten is arbitrary number. Keep versions to help debugging.
.setMaxVersions(10)
.setInMemory(true)
.setBlocksize(8 * 1024)
.setScope(HConstants.REPLICATION_SCOPE_LOCAL)
// Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore.
.setBloomFilterType(BloomType.NONE)
// Enable cache of data blocks in L1 if more than one caching tier deployed:
// e.g. if using CombinedBlockCache (BucketCache).
.setCacheDataInL1(true),
new HColumnDescriptor(HConstants.TABLE_FAMILY)
// Ten is arbitrary number. Keep versions to help debugging.
.setMaxVersions(10)
.setInMemory(true)
.setBlocksize(8 * 1024)
.setScope(HConstants.REPLICATION_SCOPE_LOCAL)
// Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore.
.setBloomFilterType(BloomType.NONE)
// Enable cache of data blocks in L1 if more than one caching tier deployed:
// e.g. if using CombinedBlockCache (BucketCache).
.setCacheDataInL1(true)
});
static {
try {
META_TABLEDESC.addCoprocessor(
"org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint",
null, Coprocessor.PRIORITY_SYSTEM, null);
} catch (IOException ex) {
//LOG.warn("exception in loading coprocessor for the hbase:meta table");
throw new RuntimeException(ex);
}
}
public final static String NAMESPACE_FAMILY_INFO = "info";
public final static byte[] NAMESPACE_FAMILY_INFO_BYTES = Bytes.toBytes(NAMESPACE_FAMILY_INFO);
public final static byte[] NAMESPACE_COL_DESC_BYTES = Bytes.toBytes("d");

View File

@ -3126,575 +3126,6 @@ public final class HBaseProtos {
// @@protoc_insertion_point(class_scope:hbase.pb.TableState)
}
public interface TableDescriptorOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hbase.pb.TableSchema schema = 1;
/**
* <code>required .hbase.pb.TableSchema schema = 1;</code>
*/
boolean hasSchema();
/**
* <code>required .hbase.pb.TableSchema schema = 1;</code>
*/
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getSchema();
/**
* <code>required .hbase.pb.TableSchema schema = 1;</code>
*/
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getSchemaOrBuilder();
}
/**
* Protobuf type {@code hbase.pb.TableDescriptor}
*
* <pre>
** On HDFS representation of table state.
* </pre>
*/
public static final class TableDescriptor extends
com.google.protobuf.GeneratedMessage
implements TableDescriptorOrBuilder {
// Use TableDescriptor.newBuilder() to construct.
private TableDescriptor(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private TableDescriptor(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final TableDescriptor defaultInstance;
public static TableDescriptor getDefaultInstance() {
return defaultInstance;
}
public TableDescriptor getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private TableDescriptor(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = schema_.toBuilder();
}
schema_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(schema_);
schema_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TableDescriptor_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TableDescriptor_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor.Builder.class);
}
public static com.google.protobuf.Parser<TableDescriptor> PARSER =
new com.google.protobuf.AbstractParser<TableDescriptor>() {
public TableDescriptor parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new TableDescriptor(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser<TableDescriptor> getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hbase.pb.TableSchema schema = 1;
public static final int SCHEMA_FIELD_NUMBER = 1;
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema schema_;
/**
* <code>required .hbase.pb.TableSchema schema = 1;</code>
*/
public boolean hasSchema() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* <code>required .hbase.pb.TableSchema schema = 1;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getSchema() {
return schema_;
}
/**
* <code>required .hbase.pb.TableSchema schema = 1;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getSchemaOrBuilder() {
return schema_;
}
private void initFields() {
schema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasSchema()) {
memoizedIsInitialized = 0;
return false;
}
if (!getSchema().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, schema_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, schema_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor)) {
return super.equals(obj);
}
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor) obj;
boolean result = true;
result = result && (hasSchema() == other.hasSchema());
if (hasSchema()) {
result = result && getSchema()
.equals(other.getSchema());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasSchema()) {
hash = (37 * hash) + SCHEMA_FIELD_NUMBER;
hash = (53 * hash) + getSchema().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hbase.pb.TableDescriptor}
*
* <pre>
** On HDFS representation of table state.
* </pre>
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder<Builder>
implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptorOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TableDescriptor_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TableDescriptor_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor.Builder.class);
}
// Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getSchemaFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (schemaBuilder_ == null) {
schema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
} else {
schemaBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_TableDescriptor_descriptor;
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor getDefaultInstanceForType() {
return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor.getDefaultInstance();
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor build() {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor buildPartial() {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (schemaBuilder_ == null) {
result.schema_ = schema_;
} else {
result.schema_ = schemaBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor) {
return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor other) {
if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor.getDefaultInstance()) return this;
if (other.hasSchema()) {
mergeSchema(other.getSchema());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasSchema()) {
return false;
}
if (!getSchema().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hbase.pb.TableSchema schema = 1;
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema schema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> schemaBuilder_;
/**
* <code>required .hbase.pb.TableSchema schema = 1;</code>
*/
public boolean hasSchema() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* <code>required .hbase.pb.TableSchema schema = 1;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getSchema() {
if (schemaBuilder_ == null) {
return schema_;
} else {
return schemaBuilder_.getMessage();
}
}
/**
* <code>required .hbase.pb.TableSchema schema = 1;</code>
*/
public Builder setSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) {
if (schemaBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
schema_ = value;
onChanged();
} else {
schemaBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* <code>required .hbase.pb.TableSchema schema = 1;</code>
*/
public Builder setSchema(
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder builderForValue) {
if (schemaBuilder_ == null) {
schema_ = builderForValue.build();
onChanged();
} else {
schemaBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* <code>required .hbase.pb.TableSchema schema = 1;</code>
*/
public Builder mergeSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) {
if (schemaBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
schema_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance()) {
schema_ =
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.newBuilder(schema_).mergeFrom(value).buildPartial();
} else {
schema_ = value;
}
onChanged();
} else {
schemaBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* <code>required .hbase.pb.TableSchema schema = 1;</code>
*/
public Builder clearSchema() {
if (schemaBuilder_ == null) {
schema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
onChanged();
} else {
schemaBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* <code>required .hbase.pb.TableSchema schema = 1;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder getSchemaBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getSchemaFieldBuilder().getBuilder();
}
/**
* <code>required .hbase.pb.TableSchema schema = 1;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getSchemaOrBuilder() {
if (schemaBuilder_ != null) {
return schemaBuilder_.getMessageOrBuilder();
} else {
return schema_;
}
}
/**
* <code>required .hbase.pb.TableSchema schema = 1;</code>
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>
getSchemaFieldBuilder() {
if (schemaBuilder_ == null) {
schemaBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>(
schema_,
getParentForChildren(),
isClean());
schema_ = null;
}
return schemaBuilder_;
}
// @@protoc_insertion_point(builder_scope:hbase.pb.TableDescriptor)
}
static {
defaultInstance = new TableDescriptor(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hbase.pb.TableDescriptor)
}
public interface ColumnFamilySchemaOrBuilder
extends com.google.protobuf.MessageOrBuilder {
@ -20112,11 +19543,6 @@ public final class HBaseProtos {
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hbase_pb_TableState_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hbase_pb_TableDescriptor_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hbase_pb_TableDescriptor_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hbase_pb_ColumnFamilySchema_descriptor;
private static
@ -20246,60 +19672,59 @@ public final class HBaseProtos {
"ameStringPair\"x\n\nTableState\022)\n\005state\030\001 \002" +
"(\0162\032.hbase.pb.TableState.State\"?\n\005State\022" +
"\013\n\007ENABLED\020\000\022\014\n\010DISABLED\020\001\022\r\n\tDISABLING\020",
"\002\022\014\n\010ENABLING\020\003\"8\n\017TableDescriptor\022%\n\006sc" +
"hema\030\001 \002(\0132\025.hbase.pb.TableSchema\"\201\001\n\022Co" +
"lumnFamilySchema\022\014\n\004name\030\001 \002(\014\022,\n\nattrib" +
"utes\030\002 \003(\0132\030.hbase.pb.BytesBytesPair\022/\n\r" +
"configuration\030\003 \003(\0132\030.hbase.pb.NameStrin" +
"gPair\"\243\001\n\nRegionInfo\022\021\n\tregion_id\030\001 \002(\004\022" +
"\'\n\ntable_name\030\002 \002(\0132\023.hbase.pb.TableName" +
"\022\021\n\tstart_key\030\003 \001(\014\022\017\n\007end_key\030\004 \001(\014\022\017\n\007" +
"offline\030\005 \001(\010\022\r\n\005split\030\006 \001(\010\022\025\n\nreplica_" +
"id\030\007 \001(\005:\0010\":\n\014FavoredNodes\022*\n\014favored_n",
"ode\030\001 \003(\0132\024.hbase.pb.ServerName\"\236\001\n\017Regi" +
"onSpecifier\022;\n\004type\030\001 \002(\0162-.hbase.pb.Reg" +
"ionSpecifier.RegionSpecifierType\022\r\n\005valu" +
"e\030\002 \002(\014\"?\n\023RegionSpecifierType\022\017\n\013REGION" +
"_NAME\020\001\022\027\n\023ENCODED_REGION_NAME\020\002\"%\n\tTime" +
"Range\022\014\n\004from\030\001 \001(\004\022\n\n\002to\030\002 \001(\004\"W\n\025Colum" +
"nFamilyTimeRange\022\025\n\rcolumn_family\030\001 \002(\014\022" +
"\'\n\ntime_range\030\002 \002(\0132\023.hbase.pb.TimeRange" +
"\"A\n\nServerName\022\021\n\thost_name\030\001 \002(\t\022\014\n\004por" +
"t\030\002 \001(\r\022\022\n\nstart_code\030\003 \001(\004\"\033\n\013Coprocess",
"or\022\014\n\004name\030\001 \002(\t\"-\n\016NameStringPair\022\014\n\004na" +
"me\030\001 \002(\t\022\r\n\005value\030\002 \002(\t\",\n\rNameBytesPair" +
"\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \001(\014\"/\n\016BytesBy" +
"tesPair\022\r\n\005first\030\001 \002(\014\022\016\n\006second\030\002 \002(\014\"," +
"\n\rNameInt64Pair\022\014\n\004name\030\001 \001(\t\022\r\n\005value\030\002" +
" \001(\003\"\325\001\n\023SnapshotDescription\022\014\n\004name\030\001 \002" +
"(\t\022\r\n\005table\030\002 \001(\t\022\030\n\rcreation_time\030\003 \001(\003" +
":\0010\0227\n\004type\030\004 \001(\0162\".hbase.pb.SnapshotDes" +
"cription.Type:\005FLUSH\022\017\n\007version\030\005 \001(\005\022\r\n" +
"\005owner\030\006 \001(\t\".\n\004Type\022\014\n\010DISABLED\020\000\022\t\n\005FL",
"USH\020\001\022\r\n\tSKIPFLUSH\020\002\"\206\001\n\024ProcedureDescri" +
"ption\022\021\n\tsignature\030\001 \002(\t\022\020\n\010instance\030\002 \001" +
"(\t\022\030\n\rcreation_time\030\003 \001(\003:\0010\022/\n\rconfigur" +
"ation\030\004 \003(\0132\030.hbase.pb.NameStringPair\"\n\n" +
"\010EmptyMsg\"\033\n\007LongMsg\022\020\n\010long_msg\030\001 \002(\003\"\037" +
"\n\tDoubleMsg\022\022\n\ndouble_msg\030\001 \002(\001\"\'\n\rBigDe" +
"cimalMsg\022\026\n\016bigdecimal_msg\030\001 \002(\014\"5\n\004UUID" +
"\022\026\n\016least_sig_bits\030\001 \002(\004\022\025\n\rmost_sig_bit" +
"s\030\002 \002(\004\"T\n\023NamespaceDescriptor\022\014\n\004name\030\001" +
" \002(\014\022/\n\rconfiguration\030\002 \003(\0132\030.hbase.pb.N",
"ameStringPair\"\235\001\n\013VersionInfo\022\017\n\007version" +
"\030\001 \002(\t\022\013\n\003url\030\002 \002(\t\022\020\n\010revision\030\003 \002(\t\022\014\n" +
"\004user\030\004 \002(\t\022\014\n\004date\030\005 \002(\t\022\024\n\014src_checksu" +
"m\030\006 \002(\t\022\025\n\rversion_major\030\007 \001(\r\022\025\n\rversio" +
"n_minor\030\010 \001(\r\"Q\n\020RegionServerInfo\022\020\n\010inf" +
"oPort\030\001 \001(\005\022+\n\014version_info\030\002 \001(\0132\025.hbas" +
"e.pb.VersionInfo*r\n\013CompareType\022\010\n\004LESS\020" +
"\000\022\021\n\rLESS_OR_EQUAL\020\001\022\t\n\005EQUAL\020\002\022\r\n\tNOT_E" +
"QUAL\020\003\022\024\n\020GREATER_OR_EQUAL\020\004\022\013\n\007GREATER\020" +
"\005\022\t\n\005NO_OP\020\006*n\n\010TimeUnit\022\017\n\013NANOSECONDS\020",
"\001\022\020\n\014MICROSECONDS\020\002\022\020\n\014MILLISECONDS\020\003\022\013\n" +
"\007SECONDS\020\004\022\013\n\007MINUTES\020\005\022\t\n\005HOURS\020\006\022\010\n\004DA" +
"YS\020\007B>\n*org.apache.hadoop.hbase.protobuf" +
".generatedB\013HBaseProtosH\001\240\001\001"
"\002\022\014\n\010ENABLING\020\003\"\201\001\n\022ColumnFamilySchema\022\014" +
"\n\004name\030\001 \002(\014\022,\n\nattributes\030\002 \003(\0132\030.hbase" +
".pb.BytesBytesPair\022/\n\rconfiguration\030\003 \003(" +
"\0132\030.hbase.pb.NameStringPair\"\243\001\n\nRegionIn" +
"fo\022\021\n\tregion_id\030\001 \002(\004\022\'\n\ntable_name\030\002 \002(" +
"\0132\023.hbase.pb.TableName\022\021\n\tstart_key\030\003 \001(" +
"\014\022\017\n\007end_key\030\004 \001(\014\022\017\n\007offline\030\005 \001(\010\022\r\n\005s" +
"plit\030\006 \001(\010\022\025\n\nreplica_id\030\007 \001(\005:\0010\":\n\014Fav" +
"oredNodes\022*\n\014favored_node\030\001 \003(\0132\024.hbase." +
"pb.ServerName\"\236\001\n\017RegionSpecifier\022;\n\004typ",
"e\030\001 \002(\0162-.hbase.pb.RegionSpecifier.Regio" +
"nSpecifierType\022\r\n\005value\030\002 \002(\014\"?\n\023RegionS" +
"pecifierType\022\017\n\013REGION_NAME\020\001\022\027\n\023ENCODED" +
"_REGION_NAME\020\002\"%\n\tTimeRange\022\014\n\004from\030\001 \001(" +
"\004\022\n\n\002to\030\002 \001(\004\"W\n\025ColumnFamilyTimeRange\022\025" +
"\n\rcolumn_family\030\001 \002(\014\022\'\n\ntime_range\030\002 \002(" +
"\0132\023.hbase.pb.TimeRange\"A\n\nServerName\022\021\n\t" +
"host_name\030\001 \002(\t\022\014\n\004port\030\002 \001(\r\022\022\n\nstart_c" +
"ode\030\003 \001(\004\"\033\n\013Coprocessor\022\014\n\004name\030\001 \002(\t\"-" +
"\n\016NameStringPair\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030",
"\002 \002(\t\",\n\rNameBytesPair\022\014\n\004name\030\001 \002(\t\022\r\n\005" +
"value\030\002 \001(\014\"/\n\016BytesBytesPair\022\r\n\005first\030\001" +
" \002(\014\022\016\n\006second\030\002 \002(\014\",\n\rNameInt64Pair\022\014\n" +
"\004name\030\001 \001(\t\022\r\n\005value\030\002 \001(\003\"\325\001\n\023SnapshotD" +
"escription\022\014\n\004name\030\001 \002(\t\022\r\n\005table\030\002 \001(\t\022" +
"\030\n\rcreation_time\030\003 \001(\003:\0010\0227\n\004type\030\004 \001(\0162" +
"\".hbase.pb.SnapshotDescription.Type:\005FLU" +
"SH\022\017\n\007version\030\005 \001(\005\022\r\n\005owner\030\006 \001(\t\".\n\004Ty" +
"pe\022\014\n\010DISABLED\020\000\022\t\n\005FLUSH\020\001\022\r\n\tSKIPFLUSH" +
"\020\002\"\206\001\n\024ProcedureDescription\022\021\n\tsignature",
"\030\001 \002(\t\022\020\n\010instance\030\002 \001(\t\022\030\n\rcreation_tim" +
"e\030\003 \001(\003:\0010\022/\n\rconfiguration\030\004 \003(\0132\030.hbas" +
"e.pb.NameStringPair\"\n\n\010EmptyMsg\"\033\n\007LongM" +
"sg\022\020\n\010long_msg\030\001 \002(\003\"\037\n\tDoubleMsg\022\022\n\ndou" +
"ble_msg\030\001 \002(\001\"\'\n\rBigDecimalMsg\022\026\n\016bigdec" +
"imal_msg\030\001 \002(\014\"5\n\004UUID\022\026\n\016least_sig_bits" +
"\030\001 \002(\004\022\025\n\rmost_sig_bits\030\002 \002(\004\"T\n\023Namespa" +
"ceDescriptor\022\014\n\004name\030\001 \002(\014\022/\n\rconfigurat" +
"ion\030\002 \003(\0132\030.hbase.pb.NameStringPair\"\235\001\n\013" +
"VersionInfo\022\017\n\007version\030\001 \002(\t\022\013\n\003url\030\002 \002(",
"\t\022\020\n\010revision\030\003 \002(\t\022\014\n\004user\030\004 \002(\t\022\014\n\004dat" +
"e\030\005 \002(\t\022\024\n\014src_checksum\030\006 \002(\t\022\025\n\rversion" +
"_major\030\007 \001(\r\022\025\n\rversion_minor\030\010 \001(\r\"Q\n\020R" +
"egionServerInfo\022\020\n\010infoPort\030\001 \001(\005\022+\n\014ver" +
"sion_info\030\002 \001(\0132\025.hbase.pb.VersionInfo*r" +
"\n\013CompareType\022\010\n\004LESS\020\000\022\021\n\rLESS_OR_EQUAL" +
"\020\001\022\t\n\005EQUAL\020\002\022\r\n\tNOT_EQUAL\020\003\022\024\n\020GREATER_" +
"OR_EQUAL\020\004\022\013\n\007GREATER\020\005\022\t\n\005NO_OP\020\006*n\n\010Ti" +
"meUnit\022\017\n\013NANOSECONDS\020\001\022\020\n\014MICROSECONDS\020" +
"\002\022\020\n\014MILLISECONDS\020\003\022\013\n\007SECONDS\020\004\022\013\n\007MINU",
"TES\020\005\022\t\n\005HOURS\020\006\022\010\n\004DAYS\020\007B>\n*org.apache" +
".hadoop.hbase.protobuf.generatedB\013HBaseP" +
"rotosH\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@ -20324,140 +19749,134 @@ public final class HBaseProtos {
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_TableState_descriptor,
new java.lang.String[] { "State", });
internal_static_hbase_pb_TableDescriptor_descriptor =
getDescriptor().getMessageTypes().get(3);
internal_static_hbase_pb_TableDescriptor_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_TableDescriptor_descriptor,
new java.lang.String[] { "Schema", });
internal_static_hbase_pb_ColumnFamilySchema_descriptor =
getDescriptor().getMessageTypes().get(4);
getDescriptor().getMessageTypes().get(3);
internal_static_hbase_pb_ColumnFamilySchema_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_ColumnFamilySchema_descriptor,
new java.lang.String[] { "Name", "Attributes", "Configuration", });
internal_static_hbase_pb_RegionInfo_descriptor =
getDescriptor().getMessageTypes().get(5);
getDescriptor().getMessageTypes().get(4);
internal_static_hbase_pb_RegionInfo_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_RegionInfo_descriptor,
new java.lang.String[] { "RegionId", "TableName", "StartKey", "EndKey", "Offline", "Split", "ReplicaId", });
internal_static_hbase_pb_FavoredNodes_descriptor =
getDescriptor().getMessageTypes().get(6);
getDescriptor().getMessageTypes().get(5);
internal_static_hbase_pb_FavoredNodes_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_FavoredNodes_descriptor,
new java.lang.String[] { "FavoredNode", });
internal_static_hbase_pb_RegionSpecifier_descriptor =
getDescriptor().getMessageTypes().get(7);
getDescriptor().getMessageTypes().get(6);
internal_static_hbase_pb_RegionSpecifier_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_RegionSpecifier_descriptor,
new java.lang.String[] { "Type", "Value", });
internal_static_hbase_pb_TimeRange_descriptor =
getDescriptor().getMessageTypes().get(8);
getDescriptor().getMessageTypes().get(7);
internal_static_hbase_pb_TimeRange_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_TimeRange_descriptor,
new java.lang.String[] { "From", "To", });
internal_static_hbase_pb_ColumnFamilyTimeRange_descriptor =
getDescriptor().getMessageTypes().get(9);
getDescriptor().getMessageTypes().get(8);
internal_static_hbase_pb_ColumnFamilyTimeRange_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_ColumnFamilyTimeRange_descriptor,
new java.lang.String[] { "ColumnFamily", "TimeRange", });
internal_static_hbase_pb_ServerName_descriptor =
getDescriptor().getMessageTypes().get(10);
getDescriptor().getMessageTypes().get(9);
internal_static_hbase_pb_ServerName_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_ServerName_descriptor,
new java.lang.String[] { "HostName", "Port", "StartCode", });
internal_static_hbase_pb_Coprocessor_descriptor =
getDescriptor().getMessageTypes().get(11);
getDescriptor().getMessageTypes().get(10);
internal_static_hbase_pb_Coprocessor_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_Coprocessor_descriptor,
new java.lang.String[] { "Name", });
internal_static_hbase_pb_NameStringPair_descriptor =
getDescriptor().getMessageTypes().get(12);
getDescriptor().getMessageTypes().get(11);
internal_static_hbase_pb_NameStringPair_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_NameStringPair_descriptor,
new java.lang.String[] { "Name", "Value", });
internal_static_hbase_pb_NameBytesPair_descriptor =
getDescriptor().getMessageTypes().get(13);
getDescriptor().getMessageTypes().get(12);
internal_static_hbase_pb_NameBytesPair_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_NameBytesPair_descriptor,
new java.lang.String[] { "Name", "Value", });
internal_static_hbase_pb_BytesBytesPair_descriptor =
getDescriptor().getMessageTypes().get(14);
getDescriptor().getMessageTypes().get(13);
internal_static_hbase_pb_BytesBytesPair_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_BytesBytesPair_descriptor,
new java.lang.String[] { "First", "Second", });
internal_static_hbase_pb_NameInt64Pair_descriptor =
getDescriptor().getMessageTypes().get(15);
getDescriptor().getMessageTypes().get(14);
internal_static_hbase_pb_NameInt64Pair_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_NameInt64Pair_descriptor,
new java.lang.String[] { "Name", "Value", });
internal_static_hbase_pb_SnapshotDescription_descriptor =
getDescriptor().getMessageTypes().get(16);
getDescriptor().getMessageTypes().get(15);
internal_static_hbase_pb_SnapshotDescription_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_SnapshotDescription_descriptor,
new java.lang.String[] { "Name", "Table", "CreationTime", "Type", "Version", "Owner", });
internal_static_hbase_pb_ProcedureDescription_descriptor =
getDescriptor().getMessageTypes().get(17);
getDescriptor().getMessageTypes().get(16);
internal_static_hbase_pb_ProcedureDescription_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_ProcedureDescription_descriptor,
new java.lang.String[] { "Signature", "Instance", "CreationTime", "Configuration", });
internal_static_hbase_pb_EmptyMsg_descriptor =
getDescriptor().getMessageTypes().get(18);
getDescriptor().getMessageTypes().get(17);
internal_static_hbase_pb_EmptyMsg_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_EmptyMsg_descriptor,
new java.lang.String[] { });
internal_static_hbase_pb_LongMsg_descriptor =
getDescriptor().getMessageTypes().get(19);
getDescriptor().getMessageTypes().get(18);
internal_static_hbase_pb_LongMsg_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_LongMsg_descriptor,
new java.lang.String[] { "LongMsg", });
internal_static_hbase_pb_DoubleMsg_descriptor =
getDescriptor().getMessageTypes().get(20);
getDescriptor().getMessageTypes().get(19);
internal_static_hbase_pb_DoubleMsg_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_DoubleMsg_descriptor,
new java.lang.String[] { "DoubleMsg", });
internal_static_hbase_pb_BigDecimalMsg_descriptor =
getDescriptor().getMessageTypes().get(21);
getDescriptor().getMessageTypes().get(20);
internal_static_hbase_pb_BigDecimalMsg_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_BigDecimalMsg_descriptor,
new java.lang.String[] { "BigdecimalMsg", });
internal_static_hbase_pb_UUID_descriptor =
getDescriptor().getMessageTypes().get(22);
getDescriptor().getMessageTypes().get(21);
internal_static_hbase_pb_UUID_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_UUID_descriptor,
new java.lang.String[] { "LeastSigBits", "MostSigBits", });
internal_static_hbase_pb_NamespaceDescriptor_descriptor =
getDescriptor().getMessageTypes().get(23);
getDescriptor().getMessageTypes().get(22);
internal_static_hbase_pb_NamespaceDescriptor_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_NamespaceDescriptor_descriptor,
new java.lang.String[] { "Name", "Configuration", });
internal_static_hbase_pb_VersionInfo_descriptor =
getDescriptor().getMessageTypes().get(24);
getDescriptor().getMessageTypes().get(23);
internal_static_hbase_pb_VersionInfo_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_VersionInfo_descriptor,
new java.lang.String[] { "Version", "Url", "Revision", "User", "Date", "SrcChecksum", "VersionMajor", "VersionMinor", });
internal_static_hbase_pb_RegionServerInfo_descriptor =
getDescriptor().getMessageTypes().get(25);
getDescriptor().getMessageTypes().get(24);
internal_static_hbase_pb_RegionServerInfo_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_RegionServerInfo_descriptor,

View File

@ -58,11 +58,6 @@ message TableState {
required State state = 1;
}
/** On HDFS representation of table state. */
message TableDescriptor {
required TableSchema schema = 1;
}
/**
* Column Family Schema
* Inspired by the rest ColumSchemaMessage

View File

@ -1,165 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase;
import java.io.IOException;
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.regionserver.BloomType;
/**
* Class represents table state on HDFS.
*/
@InterfaceAudience.Private
public class TableDescriptor {
private HTableDescriptor hTableDescriptor;
/**
* Creates TableDescriptor with Enabled table.
* @param hTableDescriptor HTableDescriptor to use
*/
@VisibleForTesting
public TableDescriptor(HTableDescriptor hTableDescriptor) {
this.hTableDescriptor = hTableDescriptor;
}
/**
* Associated HTableDescriptor
* @return instance of HTableDescriptor
*/
public HTableDescriptor getHTableDescriptor() {
return hTableDescriptor;
}
public void setHTableDescriptor(HTableDescriptor hTableDescriptor) {
this.hTableDescriptor = hTableDescriptor;
}
/**
* Convert to PB.
*/
@SuppressWarnings("deprecation")
public HBaseProtos.TableDescriptor convert() {
HBaseProtos.TableDescriptor.Builder builder = HBaseProtos.TableDescriptor.newBuilder()
.setSchema(ProtobufUtil.convertToTableSchema(hTableDescriptor));
return builder.build();
}
/**
* Convert from PB
*/
public static TableDescriptor convert(HBaseProtos.TableDescriptor proto) {
return new TableDescriptor(ProtobufUtil.convertToHTableDesc(proto.getSchema()));
}
/**
* @return This instance serialized with pb with pb magic prefix
* @see #parseFrom(byte[])
*/
public byte [] toByteArray() {
return ProtobufUtil.prependPBMagic(convert().toByteArray());
}
/**
* @param bytes A pb serialized {@link TableDescriptor} instance with pb magic prefix
* @see #toByteArray()
*/
public static TableDescriptor parseFrom(final byte [] bytes)
throws DeserializationException, IOException {
if (!ProtobufUtil.isPBMagicPrefix(bytes)) {
throw new DeserializationException("Expected PB encoded TableDescriptor");
}
int pblen = ProtobufUtil.lengthOfPBMagic();
HBaseProtos.TableDescriptor.Builder builder = HBaseProtos.TableDescriptor.newBuilder();
HBaseProtos.TableDescriptor ts;
try {
ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen);
ts = builder.build();
} catch (IOException e) {
throw new DeserializationException(e);
}
return convert(ts);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
TableDescriptor that = (TableDescriptor) o;
if (hTableDescriptor != null ?
!hTableDescriptor.equals(that.hTableDescriptor) :
that.hTableDescriptor != null) return false;
return true;
}
@Override
public int hashCode() {
return hTableDescriptor != null ? hTableDescriptor.hashCode() : 0;
}
@Override
public String toString() {
return "TableDescriptor{" +
"hTableDescriptor=" + hTableDescriptor +
'}';
}
public static HTableDescriptor metaTableDescriptor(final Configuration conf)
throws IOException {
HTableDescriptor metaDescriptor = new HTableDescriptor(
TableName.META_TABLE_NAME,
new HColumnDescriptor[] {
new HColumnDescriptor(HConstants.CATALOG_FAMILY)
.setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
HConstants.DEFAULT_HBASE_META_VERSIONS))
.setInMemory(true)
.setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
.setScope(HConstants.REPLICATION_SCOPE_LOCAL)
// Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore.
.setBloomFilterType(BloomType.NONE)
// Enable cache of data blocks in L1 if more than one caching tier deployed:
// e.g. if using CombinedBlockCache (BucketCache).
.setCacheDataInL1(true),
new HColumnDescriptor(HConstants.TABLE_FAMILY)
// Ten is arbitrary number. Keep versions to help debugging.
.setMaxVersions(10)
.setInMemory(true)
.setBlocksize(8 * 1024)
.setScope(HConstants.REPLICATION_SCOPE_LOCAL)
// Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore.
.setBloomFilterType(BloomType.NONE)
// Enable cache of data blocks in L1 if more than one caching tier deployed:
// e.g. if using CombinedBlockCache (BucketCache).
.setCacheDataInL1(true)
}) {
};
metaDescriptor.addCoprocessor(
"org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint",
null, Coprocessor.PRIORITY_SYSTEM, null);
return metaDescriptor;
}
}

View File

@ -28,20 +28,12 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
*/
@InterfaceAudience.Private
public interface TableDescriptors {
/**
* @param tableName
* @return HTableDescriptor for tablename
* @throws IOException
*/
HTableDescriptor get(final TableName tableName)
throws IOException;
/**
* @param tableName
* @return TableDescriptor for tablename
* @throws IOException
*/
TableDescriptor getDescriptor(final TableName tableName)
HTableDescriptor get(final TableName tableName)
throws IOException;
/**
@ -67,7 +59,7 @@ public interface TableDescriptors {
* @return Map of all descriptors.
* @throws IOException
*/
Map<String, TableDescriptor> getAllDescriptors()
Map<String, HTableDescriptor> getAllDescriptors()
throws IOException;
/**
@ -78,14 +70,6 @@ public interface TableDescriptors {
void add(final HTableDescriptor htd)
throws IOException;
/**
* Add or update descriptor
* @param htd Descriptor to set into TableDescriptors
* @throws IOException
*/
void add(final TableDescriptor htd)
throws IOException;
/**
* @param tablename
* @return Instance of table descriptor or null if none found.

View File

@ -77,7 +77,6 @@ import org.apache.hadoop.hbase.ScheduledChore;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerLoad;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableDescriptor;
import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotDisabledException;
@ -1353,6 +1352,7 @@ public class HMaster extends HRegionServer implements MasterServices {
return true;
}
@Override
@VisibleForTesting
public RegionNormalizer getRegionNormalizer() {
return this.normalizer;
@ -1384,10 +1384,10 @@ public class HMaster extends HRegionServer implements MasterServices {
Collections.shuffle(allEnabledTables);
for (TableName table : allEnabledTables) {
TableDescriptor tblDesc = getTableDescriptors().getDescriptor(table);
HTableDescriptor tblDesc = getTableDescriptors().get(table);
if (table.isSystemTable() || (tblDesc != null &&
tblDesc.getHTableDescriptor() != null &&
!tblDesc.getHTableDescriptor().isNormalizationEnabled())) {
tblDesc != null &&
!tblDesc.isNormalizationEnabled())) {
LOG.debug("Skipping normalization for table: " + table + ", as it's either system"
+ " table or doesn't have auto normalization turned on");
continue;
@ -2346,6 +2346,7 @@ public class HMaster extends HRegionServer implements MasterServices {
*
* @return true if active master, false if not.
*/
@Override
public boolean isActiveMaster() {
return isActiveMaster;
}
@ -2490,6 +2491,7 @@ public class HMaster extends HRegionServer implements MasterServices {
/**
* @return the underlying snapshot manager
*/
@Override
public SnapshotManager getSnapshotManager() {
return this.snapshotManager;
}
@ -2497,6 +2499,7 @@ public class HMaster extends HRegionServer implements MasterServices {
/**
* @return the underlying MasterProcedureManagerHost
*/
@Override
public MasterProcedureManagerHost getMasterProcedureManagerHost() {
return mpmHost;
}

View File

@ -31,7 +31,6 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.backup.HFileArchiver;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
@ -214,8 +213,7 @@ public class MasterFileSystem {
// meta table is a system table, so descriptors are predefined,
// we should get them from registry.
FSTableDescriptors fsd = new FSTableDescriptors(c, fs, rd);
fsd.createTableDescriptor(
new TableDescriptor(fsd.get(TableName.META_TABLE_NAME)));
fsd.createTableDescriptor(fsd.get(TableName.META_TABLE_NAME));
return rd;
}

View File

@ -29,8 +29,8 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
import com.google.common.collect.Sets;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableDescriptor;
import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException;
@ -197,7 +197,7 @@ public class TableStateManager {
public static void fixTableStates(TableDescriptors tableDescriptors, Connection connection)
throws IOException {
final Map<String, TableDescriptor> allDescriptors =
final Map<String, HTableDescriptor> allDescriptors =
tableDescriptors.getAllDescriptors();
final Map<String, TableState> states = new HashMap<>();
MetaTableAccessor.fullScanTables(connection, new MetaTableAccessor.Visitor() {
@ -209,7 +209,7 @@ public class TableStateManager {
return true;
}
});
for (Map.Entry<String, TableDescriptor> entry : allDescriptors.entrySet()) {
for (Map.Entry<String, HTableDescriptor> entry : allDescriptors.entrySet()) {
String table = entry.getKey();
if (table.equals(TableName.META_TABLE_NAME.getNameAsString()))
continue;

View File

@ -38,7 +38,6 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableDescriptor;
import org.apache.hadoop.hbase.TableExistsException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.errorhandling.ForeignException;
@ -481,7 +480,7 @@ public class CloneSnapshotProcedure
// 1. Create Table Descriptor
// using a copy of descriptor, table will be created enabling first
TableDescriptor underConstruction = new TableDescriptor(hTableDescriptor);
HTableDescriptor underConstruction = new HTableDescriptor(hTableDescriptor);
final Path tempTableDir = FSUtils.getTableDir(tempdir, hTableDescriptor.getTableName());
((FSTableDescriptors)(env.getMasterServices().getTableDescriptors()))
.createTableDescriptorForTableDirectory(tempTableDir, underConstruction, false);

View File

@ -35,7 +35,6 @@ import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableDescriptor;
import org.apache.hadoop.hbase.TableExistsException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
@ -364,11 +363,10 @@ public class CreateTableProcedure
// 1. Create Table Descriptor
// using a copy of descriptor, table will be created enabling first
TableDescriptor underConstruction = new TableDescriptor(hTableDescriptor);
final Path tempTableDir = FSUtils.getTableDir(tempdir, hTableDescriptor.getTableName());
((FSTableDescriptors)(env.getMasterServices().getTableDescriptors()))
.createTableDescriptorForTableDirectory(
tempTableDir, underConstruction, false);
tempTableDir, hTableDescriptor, false);
// 2. Create Regions
newRegions = hdfsRegionHandler.createHdfsRegions(env, tempdir,

View File

@ -96,7 +96,7 @@ public class TruncateTableProcedure
break;
case TRUNCATE_TABLE_REMOVE_FROM_META:
hTableDescriptor = env.getMasterServices().getTableDescriptors()
.getDescriptor(tableName).getHTableDescriptor();
.get(tableName);
DeleteTableProcedure.deleteFromMeta(env, getTableName(), regions);
DeleteTableProcedure.deleteAssignmentState(env, getTableName());
setNextState(TruncateTableState.TRUNCATE_TABLE_CLEAR_FS_LAYOUT);

View File

@ -34,7 +34,6 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.TableDescriptor;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
@ -112,14 +111,14 @@ public class CompactionTool extends Configured implements Tool {
if (isFamilyDir(fs, path)) {
Path regionDir = path.getParent();
Path tableDir = regionDir.getParent();
TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir);
compactStoreFiles(tableDir, htd.getHTableDescriptor(), hri,
compactStoreFiles(tableDir, htd, hri,
path.getName(), compactOnce, major);
} else if (isRegionDir(fs, path)) {
Path tableDir = path.getParent();
TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
compactRegion(tableDir, htd.getHTableDescriptor(), path, compactOnce, major);
HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
compactRegion(tableDir, htd, path, compactOnce, major);
} else if (isTableDir(fs, path)) {
compactTable(path, compactOnce, major);
} else {
@ -130,9 +129,9 @@ public class CompactionTool extends Configured implements Tool {
private void compactTable(final Path tableDir, final boolean compactOnce, final boolean major)
throws IOException {
TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
for (Path regionDir: FSUtils.getRegionDirs(fs, tableDir)) {
compactRegion(tableDir, htd.getHTableDescriptor(), regionDir, compactOnce, major);
compactRegion(tableDir, htd, regionDir, compactOnce, major);
}
}

View File

@ -42,7 +42,6 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableDescriptor;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
import org.apache.hadoop.hbase.mob.MobUtils;
@ -348,8 +347,7 @@ public final class SnapshotManifest {
private void load() throws IOException {
switch (getSnapshotFormat(desc)) {
case SnapshotManifestV1.DESCRIPTOR_VERSION: {
this.htd = FSTableDescriptors.getTableDescriptorFromFs(fs, workingDir)
.getHTableDescriptor();
this.htd = FSTableDescriptors.getTableDescriptorFromFs(fs, workingDir);
ThreadPoolExecutor tpool = createExecutor("SnapshotManifestLoader");
try {
this.regionManifests =
@ -447,8 +445,7 @@ public final class SnapshotManifest {
LOG.info("Using old Snapshot Format");
// write a copy of descriptor to the snapshot directory
new FSTableDescriptors(conf, fs, rootDir)
.createTableDescriptorForTableDirectory(workingDir, new TableDescriptor(
htd), false);
.createTableDescriptorForTableDirectory(workingDir, htd, false);
} else {
LOG.debug("Convert to Single Snapshot Manifest");
convertToV2SingleManifest();

View File

@ -40,15 +40,16 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.hbase.Coprocessor;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableDescriptor;
import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.TableInfoMissingException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.regionserver.BloomType;
/**
* Implementation of {@link TableDescriptors} that reads descriptors from the
@ -88,13 +89,13 @@ public class FSTableDescriptors implements TableDescriptors {
// This cache does not age out the old stuff. Thinking is that the amount
// of data we keep up in here is so small, no need to do occasional purge.
// TODO.
private final Map<TableName, TableDescriptor> cache =
new ConcurrentHashMap<TableName, TableDescriptor>();
private final Map<TableName, HTableDescriptor> cache =
new ConcurrentHashMap<TableName, HTableDescriptor>();
/**
* Table descriptor for <code>hbase:meta</code> catalog table
*/
private final HTableDescriptor metaTableDescritor;
private final HTableDescriptor metaTableDescriptor;
/**
* Construct a FSTableDescriptors instance using the hbase root dir of the given
@ -122,7 +123,44 @@ public class FSTableDescriptors implements TableDescriptors {
this.fsreadonly = fsreadonly;
this.usecache = usecache;
this.metaTableDescritor = TableDescriptor.metaTableDescriptor(conf);
this.metaTableDescriptor = createMetaTableDescriptor(conf);
}
@VisibleForTesting
public static HTableDescriptor createMetaTableDescriptor(final Configuration conf)
throws IOException {
HTableDescriptor metaDescriptor = new HTableDescriptor(
TableName.META_TABLE_NAME,
new HColumnDescriptor[] {
new HColumnDescriptor(HConstants.CATALOG_FAMILY)
.setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
HConstants.DEFAULT_HBASE_META_VERSIONS))
.setInMemory(true)
.setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
.setScope(HConstants.REPLICATION_SCOPE_LOCAL)
// Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore.
.setBloomFilterType(BloomType.NONE)
// Enable cache of data blocks in L1 if more than one caching tier deployed:
// e.g. if using CombinedBlockCache (BucketCache).
.setCacheDataInL1(true),
new HColumnDescriptor(HConstants.TABLE_FAMILY)
// Ten is arbitrary number. Keep versions to help debugging.
.setMaxVersions(10)
.setInMemory(true)
.setBlocksize(8 * 1024)
.setScope(HConstants.REPLICATION_SCOPE_LOCAL)
// Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore.
.setBloomFilterType(BloomType.NONE)
// Enable cache of data blocks in L1 if more than one caching tier deployed:
// e.g. if using CombinedBlockCache (BucketCache).
.setCacheDataInL1(true)
}) {
};
metaDescriptor.addCoprocessor(
"org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint",
null, Coprocessor.PRIORITY_SYSTEM, null);
return metaDescriptor;
}
@Override
@ -150,12 +188,12 @@ public class FSTableDescriptors implements TableDescriptors {
*/
@Override
@Nullable
public TableDescriptor getDescriptor(final TableName tablename)
public HTableDescriptor get(final TableName tablename)
throws IOException {
invocations++;
if (TableName.META_TABLE_NAME.equals(tablename)) {
cachehits++;
return new TableDescriptor(metaTableDescritor);
return metaTableDescriptor;
}
// hbase:meta is already handled. If some one tries to get the descriptor for
// .logs, .oldlogs or .corrupt throw an exception.
@ -165,15 +203,15 @@ public class FSTableDescriptors implements TableDescriptors {
if (usecache) {
// Look in cache of descriptors.
TableDescriptor cachedtdm = this.cache.get(tablename);
HTableDescriptor cachedtdm = this.cache.get(tablename);
if (cachedtdm != null) {
cachehits++;
return cachedtdm;
}
}
TableDescriptor tdmt = null;
HTableDescriptor tdmt = null;
try {
tdmt = getTableDescriptorFromFs(fs, rootdir, tablename, !fsreadonly);
tdmt = getTableDescriptorFromFs(fs, rootdir, tablename);
} catch (NullPointerException e) {
LOG.debug("Exception during readTableDecriptor. Current table name = "
+ tablename, e);
@ -191,44 +229,27 @@ public class FSTableDescriptors implements TableDescriptors {
return tdmt;
}
/**
* Get the current table descriptor for the given table, or null if none exists.
*
* Uses a local cache of the descriptor but still checks the filesystem on each call
* to see if a newer file has been created since the cached one was read.
*/
@Override
public HTableDescriptor get(TableName tableName) throws IOException {
if (TableName.META_TABLE_NAME.equals(tableName)) {
cachehits++;
return metaTableDescritor;
}
TableDescriptor descriptor = getDescriptor(tableName);
return descriptor == null ? null : descriptor.getHTableDescriptor();
}
/**
* Returns a map from table name to table descriptor for all tables.
*/
@Override
public Map<String, TableDescriptor> getAllDescriptors()
public Map<String, HTableDescriptor> getAllDescriptors()
throws IOException {
Map<String, TableDescriptor> tds = new TreeMap<String, TableDescriptor>();
Map<String, HTableDescriptor> tds = new TreeMap<String, HTableDescriptor>();
if (fsvisited && usecache) {
for (Map.Entry<TableName, TableDescriptor> entry: this.cache.entrySet()) {
for (Map.Entry<TableName, HTableDescriptor> entry: this.cache.entrySet()) {
tds.put(entry.getKey().toString(), entry.getValue());
}
// add hbase:meta to the response
tds.put(this.metaTableDescritor.getNameAsString(),
new TableDescriptor(metaTableDescritor));
tds.put(this.metaTableDescriptor.getNameAsString(), metaTableDescriptor);
} else {
LOG.debug("Fetching table descriptors from the filesystem.");
boolean allvisited = true;
for (Path d : FSUtils.getTableDirs(fs, rootdir)) {
TableDescriptor htd = null;
HTableDescriptor htd = null;
try {
htd = getDescriptor(FSUtils.getTableName(d));
htd = get(FSUtils.getTableName(d));
} catch (FileNotFoundException fnfe) {
// inability of retrieving one HTD shouldn't stop getting the remaining
LOG.warn("Trouble retrieving htd", fnfe);
@ -237,7 +258,7 @@ public class FSTableDescriptors implements TableDescriptors {
allvisited = false;
continue;
} else {
tds.put(htd.getHTableDescriptor().getTableName().getNameAsString(), htd);
tds.put(htd.getTableName().getNameAsString(), htd);
}
fsvisited = allvisited;
}
@ -251,10 +272,10 @@ public class FSTableDescriptors implements TableDescriptors {
@Override
public Map<String, HTableDescriptor> getAll() throws IOException {
Map<String, HTableDescriptor> htds = new TreeMap<String, HTableDescriptor>();
Map<String, TableDescriptor> allDescriptors = getAllDescriptors();
for (Map.Entry<String, TableDescriptor> entry : allDescriptors
Map<String, HTableDescriptor> allDescriptors = getAllDescriptors();
for (Map.Entry<String, HTableDescriptor> entry : allDescriptors
.entrySet()) {
htds.put(entry.getKey(), entry.getValue().getHTableDescriptor());
htds.put(entry.getKey(), entry.getValue());
}
return htds;
}
@ -283,27 +304,6 @@ public class FSTableDescriptors implements TableDescriptors {
return htds;
}
/**
* Adds (or updates) the table descriptor to the FileSystem
* and updates the local cache with it.
*/
@Override
public void add(TableDescriptor htd) throws IOException {
if (fsreadonly) {
throw new NotImplementedException("Cannot add a table descriptor - in read only mode");
}
TableName tableName = htd.getHTableDescriptor().getTableName();
if (TableName.META_TABLE_NAME.equals(tableName)) {
throw new NotImplementedException();
}
if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tableName.getNameAsString())) {
throw new NotImplementedException(
"Cannot add a table descriptor for a reserved subdirectory name: "
+ htd.getHTableDescriptor().getNameAsString());
}
updateTableDescriptor(htd);
}
/**
* Adds (or updates) the table descriptor to the FileSystem
* and updates the local cache with it.
@ -322,12 +322,7 @@ public class FSTableDescriptors implements TableDescriptors {
"Cannot add a table descriptor for a reserved subdirectory name: "
+ htd.getNameAsString());
}
TableDescriptor descriptor = getDescriptor(htd.getTableName());
if (descriptor == null)
descriptor = new TableDescriptor(htd);
else
descriptor.setHTableDescriptor(htd);
updateTableDescriptor(descriptor);
updateTableDescriptor(htd);
}
/**
@ -347,12 +342,8 @@ public class FSTableDescriptors implements TableDescriptors {
throw new IOException("Failed delete of " + tabledir.toString());
}
}
TableDescriptor descriptor = this.cache.remove(tablename);
if (descriptor == null) {
return null;
} else {
return descriptor.getHTableDescriptor();
}
HTableDescriptor descriptor = this.cache.remove(tablename);
return descriptor;
}
/**
@ -535,49 +526,28 @@ public class FSTableDescriptors implements TableDescriptors {
* if it exists, bypassing the local cache.
* Returns null if it's not found.
*/
public static TableDescriptor getTableDescriptorFromFs(FileSystem fs,
public static HTableDescriptor getTableDescriptorFromFs(FileSystem fs,
Path hbaseRootDir, TableName tableName) throws IOException {
Path tableDir = FSUtils.getTableDir(hbaseRootDir, tableName);
return getTableDescriptorFromFs(fs, tableDir);
}
/**
* Returns the latest table descriptor for the given table directly from the file system
* if it exists, bypassing the local cache.
* Returns null if it's not found.
*/
public static TableDescriptor getTableDescriptorFromFs(FileSystem fs,
Path hbaseRootDir, TableName tableName, boolean rewritePb) throws IOException {
Path tableDir = FSUtils.getTableDir(hbaseRootDir, tableName);
return getTableDescriptorFromFs(fs, tableDir, rewritePb);
}
/**
* Returns the latest table descriptor for the table located at the given directory
* directly from the file system if it exists.
* @throws TableInfoMissingException if there is no descriptor
*/
public static TableDescriptor getTableDescriptorFromFs(FileSystem fs, Path tableDir)
throws IOException {
return getTableDescriptorFromFs(fs, tableDir, false);
}
/**
* Returns the latest table descriptor for the table located at the given directory
* directly from the file system if it exists.
* @throws TableInfoMissingException if there is no descriptor
*/
public static TableDescriptor getTableDescriptorFromFs(FileSystem fs, Path tableDir,
boolean rewritePb)
public static HTableDescriptor getTableDescriptorFromFs(FileSystem fs, Path tableDir)
throws IOException {
FileStatus status = getTableInfoPath(fs, tableDir, false);
if (status == null) {
throw new TableInfoMissingException("No table descriptor file under " + tableDir);
}
return readTableDescriptor(fs, status, rewritePb);
return readTableDescriptor(fs, status);
}
private static TableDescriptor readTableDescriptor(FileSystem fs, FileStatus status,
boolean rewritePb) throws IOException {
private static HTableDescriptor readTableDescriptor(FileSystem fs, FileStatus status)
throws IOException {
int len = Ints.checkedCast(status.getLen());
byte [] content = new byte[len];
FSDataInputStream fsDataInputStream = fs.open(status.getPath());
@ -586,34 +556,13 @@ public class FSTableDescriptors implements TableDescriptors {
} finally {
fsDataInputStream.close();
}
TableDescriptor td = null;
HTableDescriptor htd = null;
try {
td = TableDescriptor.parseFrom(content);
htd = HTableDescriptor.parseFrom(content);
} catch (DeserializationException e) {
// we have old HTableDescriptor here
try {
HTableDescriptor htd = HTableDescriptor.parseFrom(content);
LOG.warn("Found old table descriptor, converting to new format for table " +
htd.getTableName() + "; NOTE table will be in ENABLED state!");
td = new TableDescriptor(htd);
if (rewritePb) rewriteTableDescriptor(fs, status, td);
} catch (DeserializationException e1) {
throw new IOException("content=" + Bytes.toShort(content), e);
}
}
if (rewritePb && !ProtobufUtil.isPBMagicPrefix(content)) {
// Convert the file over to be pb before leaving here.
rewriteTableDescriptor(fs, status, td);
}
return td;
}
private static void rewriteTableDescriptor(final FileSystem fs, final FileStatus status,
final TableDescriptor td)
throws IOException {
Path tableInfoDir = status.getPath().getParent();
Path tableDir = tableInfoDir.getParent();
writeTableDescriptor(fs, td, tableDir, status);
return htd;
}
/**
@ -621,18 +570,18 @@ public class FSTableDescriptors implements TableDescriptors {
* @throws IOException Thrown if failed update.
* @throws NotImplementedException if in read only mode
*/
@VisibleForTesting Path updateTableDescriptor(TableDescriptor td)
@VisibleForTesting Path updateTableDescriptor(HTableDescriptor td)
throws IOException {
if (fsreadonly) {
throw new NotImplementedException("Cannot update a table descriptor - in read only mode");
}
TableName tableName = td.getHTableDescriptor().getTableName();
TableName tableName = td.getTableName();
Path tableDir = getTableDir(tableName);
Path p = writeTableDescriptor(fs, td, tableDir, getTableInfoPath(tableDir));
if (p == null) throw new IOException("Failed update");
LOG.info("Updated tableinfo=" + p);
if (usecache) {
this.cache.put(td.getHTableDescriptor().getTableName(), td);
this.cache.put(td.getTableName(), td);
}
return p;
}
@ -683,7 +632,7 @@ public class FSTableDescriptors implements TableDescriptors {
* @return Descriptor file or null if we failed write.
*/
private static Path writeTableDescriptor(final FileSystem fs,
final TableDescriptor htd, final Path tableDir,
final HTableDescriptor htd, final Path tableDir,
final FileStatus currentDescriptorFile)
throws IOException {
// Get temporary dir into which we'll first write a file to avoid half-written file phenomenon.
@ -738,7 +687,7 @@ public class FSTableDescriptors implements TableDescriptors {
return tableInfoDirPath;
}
private static void writeTD(final FileSystem fs, final Path p, final TableDescriptor htd)
private static void writeTD(final FileSystem fs, final Path p, final HTableDescriptor htd)
throws IOException {
FSDataOutputStream out = fs.create(p, false);
try {
@ -750,22 +699,13 @@ public class FSTableDescriptors implements TableDescriptors {
}
}
/**
* Create new HTableDescriptor in HDFS. Happens when we are creating table.
* Used by tests.
* @return True if we successfully created file.
*/
public boolean createTableDescriptor(TableDescriptor htd) throws IOException {
return createTableDescriptor(htd, false);
}
/**
* Create new HTableDescriptor in HDFS. Happens when we are creating table.
* Used by tests.
* @return True if we successfully created file.
*/
public boolean createTableDescriptor(HTableDescriptor htd) throws IOException {
return createTableDescriptor(new TableDescriptor(htd), false);
return createTableDescriptor(htd, false);
}
/**
@ -775,19 +715,10 @@ public class FSTableDescriptors implements TableDescriptors {
*
* @return True if we successfully created file.
*/
public boolean createTableDescriptor(TableDescriptor htd, boolean forceCreation)
throws IOException {
Path tableDir = getTableDir(htd.getHTableDescriptor().getTableName());
return createTableDescriptorForTableDirectory(tableDir, htd, forceCreation);
}
/**
* Create tables descriptor for given HTableDescriptor. Default TableDescriptor state
* will be used (typically ENABLED).
*/
public boolean createTableDescriptor(HTableDescriptor htd, boolean forceCreation)
throws IOException {
return createTableDescriptor(new TableDescriptor(htd), forceCreation);
Path tableDir = getTableDir(htd.getTableName());
return createTableDescriptorForTableDirectory(tableDir, htd, forceCreation);
}
/**
@ -802,7 +733,7 @@ public class FSTableDescriptors implements TableDescriptors {
* @throws IOException if a filesystem error occurs
*/
public boolean createTableDescriptorForTableDirectory(Path tableDir,
TableDescriptor htd, boolean forceCreation) throws IOException {
HTableDescriptor htd, boolean forceCreation) throws IOException {
if (fsreadonly) {
throw new NotImplementedException("Cannot create a table descriptor - in read only mode");
}
@ -811,7 +742,7 @@ public class FSTableDescriptors implements TableDescriptors {
LOG.debug("Current tableInfoPath = " + status.getPath());
if (!forceCreation) {
if (fs.exists(status.getPath()) && status.getLen() > 0) {
if (readTableDescriptor(fs, status, false).equals(htd)) {
if (readTableDescriptor(fs, status).equals(htd)) {
LOG.debug("TableInfo already exists.. Skipping creation");
return false;
}

View File

@ -94,7 +94,6 @@ import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
@ -1201,9 +1200,9 @@ public class HBaseFsck extends Configured implements Closeable {
modTInfo = new TableInfo(tableName);
tablesInfo.put(tableName, modTInfo);
try {
TableDescriptor htd =
HTableDescriptor htd =
FSTableDescriptors.getTableDescriptorFromFs(fs, hbaseRoot, tableName);
modTInfo.htds.add(htd.getHTableDescriptor());
modTInfo.htds.add(htd);
} catch (IOException ioe) {
if (!orphanTableDirs.containsKey(tableName)) {
LOG.warn("Unable to read .tableinfo from " + hbaseRoot, ioe);
@ -1258,7 +1257,7 @@ public class HBaseFsck extends Configured implements Closeable {
for (String columnfamimly : columns) {
htd.addFamily(new HColumnDescriptor(columnfamimly));
}
fstd.createTableDescriptor(new TableDescriptor(htd), true);
fstd.createTableDescriptor(htd, true);
return true;
}
@ -1306,7 +1305,7 @@ public class HBaseFsck extends Configured implements Closeable {
if (tableName.equals(htds[j].getTableName())) {
HTableDescriptor htd = htds[j];
LOG.info("fixing orphan table: " + tableName + " from cache");
fstd.createTableDescriptor(new TableDescriptor(htd), true);
fstd.createTableDescriptor(htd, true);
j++;
iter.remove();
}

View File

@ -153,8 +153,7 @@ class HMerge {
this.rootDir = FSUtils.getRootDir(conf);
Path tabledir = FSUtils.getTableDir(this.rootDir, tableName);
this.htd = FSTableDescriptors.getTableDescriptorFromFs(this.fs, tabledir)
.getHTableDescriptor();
this.htd = FSTableDescriptors.getTableDescriptorFromFs(this.fs, tabledir);
String logname = "merge_" + System.currentTimeMillis() + HConstants.HREGION_LOGDIR_NAME;
final Configuration walConf = new Configuration(conf);

View File

@ -29,7 +29,6 @@ import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.TableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
@ -139,9 +138,9 @@ public class Merge extends Configured implements Tool {
if (info2 == null) {
throw new NullPointerException("info2 is null using key " + meta);
}
TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(FileSystem.get(getConf()),
HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(FileSystem.get(getConf()),
this.rootdir, this.tableName);
HRegion merged = merge(htd.getHTableDescriptor(), meta, info1, info2);
HRegion merged = merge(htd, meta, info1, info2);
LOG.info("Adding " + merged.getRegionInfo() + " to " +
meta.getRegionInfo());

View File

@ -159,8 +159,8 @@ public class TestHColumnDescriptorDefaultVersions {
// Verify descriptor from HDFS
MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName);
TableDescriptor td = FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir);
hcds = td.getHTableDescriptor().getColumnFamilies();
HTableDescriptor td = FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir);
hcds = td.getColumnFamilies();
verifyHColumnDescriptor(expected, hcds, tableName, families);
}

View File

@ -1,55 +0,0 @@
/**
* Copyright The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import static org.junit.Assert.assertEquals;
/**
* Test setting values in the descriptor
*/
@Category(SmallTests.class)
public class TestTableDescriptor {
private static final Log LOG = LogFactory.getLog(TestTableDescriptor.class);
@Test
public void testPb() throws DeserializationException, IOException {
HTableDescriptor htd = new HTableDescriptor(TableName.META_TABLE_NAME);
final int v = 123;
htd.setMaxFileSize(v);
htd.setDurability(Durability.ASYNC_WAL);
htd.setReadOnly(true);
htd.setRegionReplication(2);
TableDescriptor td = new TableDescriptor(htd);
byte[] bytes = td.toByteArray();
TableDescriptor deserializedTd = TableDescriptor.parseFrom(bytes);
assertEquals(td, deserializedTd);
assertEquals(td.getHTableDescriptor(), deserializedTd.getHTableDescriptor());
}
}

View File

@ -26,7 +26,6 @@ import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.spy;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.SortedMap;
import java.util.TreeMap;
@ -47,10 +46,8 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaMockingUtil;
import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
import org.apache.hadoop.hbase.ProcedureInfo;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableDescriptor;
import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.ClusterConnection;
@ -59,12 +56,8 @@ import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager;
import org.apache.hadoop.hbase.coordination.SplitLogManagerCoordination;
import org.apache.hadoop.hbase.coordination.SplitLogManagerCoordination.SplitLogManagerDetails;
import org.apache.hadoop.hbase.executor.ExecutorService;
import org.apache.hadoop.hbase.io.Reference;
import org.apache.hadoop.hbase.master.CatalogJanitor.SplitParentFirstComparator;
import org.apache.hadoop.hbase.master.normalizer.RegionNormalizer;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
@ -75,9 +68,7 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateResponse;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionAction;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionActionResult;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultOrException;
import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
import org.apache.hadoop.hbase.regionserver.HStore;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.testclassification.MasterTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.apache.hadoop.hbase.util.Bytes;
@ -93,7 +84,6 @@ import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import com.google.protobuf.RpcController;
import com.google.protobuf.Service;
import com.google.protobuf.ServiceException;
@Category({MasterTests.class, SmallTests.class})
@ -264,7 +254,7 @@ public class TestCatalogJanitor {
return null;
}
@Override public Map<String, TableDescriptor> getAllDescriptors() throws IOException {
@Override public Map<String, HTableDescriptor> getAllDescriptors() throws IOException {
// noop
return null;
}
@ -275,12 +265,6 @@ public class TestCatalogJanitor {
return createHTableDescriptor();
}
@Override
public TableDescriptor getDescriptor(TableName tablename)
throws IOException {
return createTableDescriptor();
}
@Override
public Map<String, HTableDescriptor> getByNamespace(String name) throws IOException {
return null;
@ -291,11 +275,6 @@ public class TestCatalogJanitor {
// noop
}
@Override
public void add(TableDescriptor htd) throws IOException {
// noop
}
@Override
public void setCacheOn() throws IOException {
}
@ -867,10 +846,6 @@ public class TestCatalogJanitor {
return htd;
}
private TableDescriptor createTableDescriptor() {
return new TableDescriptor(createHTableDescriptor());
}
private MultiResponse buildMultiResponse(MultiRequest req) {
MultiResponse.Builder builder = MultiResponse.newBuilder();
RegionActionResult.Builder regionActionResultBuilder =

View File

@ -35,7 +35,6 @@ import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableDescriptor;
import org.apache.hadoop.hbase.client.BufferedMutator;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.Durability;
@ -119,9 +118,7 @@ public class MasterProcedureTestingUtility {
assertEquals(regions.length, countMetaRegions(master, tableName));
// check htd
TableDescriptor tableDesc = master.getTableDescriptors().getDescriptor(tableName);
assertTrue("table descriptor not found", tableDesc != null);
HTableDescriptor htd = tableDesc.getHTableDescriptor();
HTableDescriptor htd = master.getTableDescriptors().get(tableName);
assertTrue("table descriptor not found", htd != null);
for (int i = 0; i < family.length; ++i) {
assertTrue("family not found " + family[i], htd.getFamily(Bytes.toBytes(family[i])) != null);
@ -142,7 +139,7 @@ public class MasterProcedureTestingUtility {
// check htd
assertTrue("found htd of deleted table",
master.getTableDescriptors().getDescriptor(tableName) == null);
master.getTableDescriptors().get(tableName) == null);
}
private static int countMetaRegions(final HMaster master, final TableName tableName)
@ -368,18 +365,18 @@ public class MasterProcedureTestingUtility {
public static void validateColumnFamilyAddition(final HMaster master, final TableName tableName,
final String family) throws IOException {
TableDescriptor htd = master.getTableDescriptors().getDescriptor(tableName);
HTableDescriptor htd = master.getTableDescriptors().get(tableName);
assertTrue(htd != null);
assertTrue(htd.getHTableDescriptor().hasFamily(family.getBytes()));
assertTrue(htd.hasFamily(family.getBytes()));
}
public static void validateColumnFamilyDeletion(final HMaster master, final TableName tableName,
final String family) throws IOException {
// verify htd
TableDescriptor htd = master.getTableDescriptors().getDescriptor(tableName);
HTableDescriptor htd = master.getTableDescriptors().get(tableName);
assertTrue(htd != null);
assertFalse(htd.getHTableDescriptor().hasFamily(family.getBytes()));
assertFalse(htd.hasFamily(family.getBytes()));
// verify fs
final FileSystem fs = master.getMasterFileSystem().getFileSystem();
@ -393,10 +390,10 @@ public class MasterProcedureTestingUtility {
public static void validateColumnFamilyModification(final HMaster master,
final TableName tableName, final String family, HColumnDescriptor columnDescriptor)
throws IOException {
TableDescriptor htd = master.getTableDescriptors().getDescriptor(tableName);
HTableDescriptor htd = master.getTableDescriptors().get(tableName);
assertTrue(htd != null);
HColumnDescriptor hcfd = htd.getHTableDescriptor().getFamily(family.getBytes());
HColumnDescriptor hcfd = htd.getFamily(family.getBytes());
assertTrue(hcfd.equals(columnDescriptor));
}

View File

@ -30,7 +30,6 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.InvalidFamilyOperationException;
import org.apache.hadoop.hbase.TableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.master.MasterFileSystem;
@ -275,9 +274,9 @@ public class TestTableDescriptorModificationFromClient {
// Verify descriptor from HDFS
MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName);
TableDescriptor td =
HTableDescriptor td =
FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir);
verifyTableDescriptor(td.getHTableDescriptor(), tableName, families);
verifyTableDescriptor(td, tableName, families);
}
private void verifyTableDescriptor(final HTableDescriptor htd,

View File

@ -39,7 +39,6 @@ import org.apache.hadoop.hbase.KeepDeletedCells;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueTestUtil;
import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.TableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.testclassification.MediumTests;
@ -47,6 +46,7 @@ import org.apache.hadoop.hbase.testclassification.RegionServerTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdge;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSTableDescriptors;
import org.apache.hadoop.hbase.wal.WALFactory;
import org.junit.Before;
import org.junit.Rule;
@ -431,6 +431,7 @@ public class TestDefaultMemStore {
this.startSeqNum = startSeqNum;
}
@Override
public void run() {
try {
internalRun();
@ -961,7 +962,7 @@ public class TestDefaultMemStore {
edge.setCurrentTimeMillis(1234);
WALFactory wFactory = new WALFactory(conf, null, "1234");
HRegion meta = HRegion.createHRegion(HRegionInfo.FIRST_META_REGIONINFO, testDir,
conf, TableDescriptor.metaTableDescriptor(conf),
conf, FSTableDescriptors.createMetaTableDescriptor(conf),
wFactory.getMetaWAL(HRegionInfo.FIRST_META_REGIONINFO.
getEncodedNameAsBytes()));
HRegionInfo hri = new HRegionInfo(TableName.valueOf("testShouldFlushMeta"),

View File

@ -53,7 +53,6 @@ import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TestMobSnapshotCloneIndependence;
import org.apache.hadoop.hbase.exceptions.MergeRegionException;
import org.apache.hadoop.hbase.master.AssignmentManager;
import org.apache.hadoop.hbase.master.HMaster;
@ -64,7 +63,6 @@ import org.apache.hadoop.hbase.master.RegionStates;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
import org.apache.hadoop.hbase.util.Bytes;

View File

@ -44,7 +44,6 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotEnabledException;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
@ -498,8 +497,7 @@ public final class SnapshotTestingUtils {
this.tableRegions = tableRegions;
this.snapshotDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc, rootDir);
new FSTableDescriptors(conf)
.createTableDescriptorForTableDirectory(snapshotDir,
new TableDescriptor(htd), false);
.createTableDescriptorForTableDirectory(snapshotDir, htd, false);
}
public HTableDescriptor getTableDescriptor() {
@ -679,8 +677,7 @@ public final class SnapshotTestingUtils {
private RegionData[] createTable(final HTableDescriptor htd, final int nregions)
throws IOException {
Path tableDir = FSUtils.getTableDir(rootDir, htd.getTableName());
new FSTableDescriptors(conf).createTableDescriptorForTableDirectory(tableDir,
new TableDescriptor(htd), false);
new FSTableDescriptors(conf).createTableDescriptorForTableDirectory(tableDir, htd, false);
assertTrue(nregions % 2 == 0);
RegionData[] regions = new RegionData[nregions];

View File

@ -38,7 +38,6 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.TableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
@ -75,15 +74,14 @@ public class TestFSTableDescriptors {
public void testCreateAndUpdate() throws IOException {
Path testdir = UTIL.getDataTestDir("testCreateAndUpdate");
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testCreate"));
TableDescriptor td = new TableDescriptor(htd);
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, testdir);
assertTrue(fstd.createTableDescriptor(td));
assertFalse(fstd.createTableDescriptor(td));
assertTrue(fstd.createTableDescriptor(htd));
assertFalse(fstd.createTableDescriptor(htd));
FileStatus [] statuses = fs.listStatus(testdir);
assertTrue("statuses.length="+statuses.length, statuses.length == 1);
for (int i = 0; i < 10; i++) {
fstd.updateTableDescriptor(td);
fstd.updateTableDescriptor(htd);
}
statuses = fs.listStatus(testdir);
assertTrue(statuses.length == 1);
@ -97,29 +95,27 @@ public class TestFSTableDescriptors {
Path testdir = UTIL.getDataTestDir("testSequenceidAdvancesOnTableInfo");
HTableDescriptor htd = new HTableDescriptor(
TableName.valueOf("testSequenceidAdvancesOnTableInfo"));
TableDescriptor td = new TableDescriptor(htd);
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, testdir);
Path p0 = fstd.updateTableDescriptor(td);
Path p0 = fstd.updateTableDescriptor(htd);
int i0 = FSTableDescriptors.getTableInfoSequenceId(p0);
Path p1 = fstd.updateTableDescriptor(td);
Path p1 = fstd.updateTableDescriptor(htd);
// Assert we cleaned up the old file.
assertTrue(!fs.exists(p0));
int i1 = FSTableDescriptors.getTableInfoSequenceId(p1);
assertTrue(i1 == i0 + 1);
Path p2 = fstd.updateTableDescriptor(td);
Path p2 = fstd.updateTableDescriptor(htd);
// Assert we cleaned up the old file.
assertTrue(!fs.exists(p1));
int i2 = FSTableDescriptors.getTableInfoSequenceId(p2);
assertTrue(i2 == i1 + 1);
td = new TableDescriptor(htd);
Path p3 = fstd.updateTableDescriptor(td);
Path p3 = fstd.updateTableDescriptor(htd);
// Assert we cleaned up the old file.
assertTrue(!fs.exists(p2));
int i3 = FSTableDescriptors.getTableInfoSequenceId(p3);
assertTrue(i3 == i2 + 1);
TableDescriptor descriptor = fstd.getDescriptor(htd.getTableName());
assertEquals(descriptor, td);
HTableDescriptor descriptor = fstd.get(htd.getTableName());
assertEquals(descriptor, htd);
}
@Test
@ -171,13 +167,12 @@ public class TestFSTableDescriptors {
final String name = "testReadingHTDFromFS";
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
TableDescriptor td = new TableDescriptor(htd);
Path rootdir = UTIL.getDataTestDir(name);
FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
fstd.createTableDescriptor(td);
TableDescriptor td2 =
fstd.createTableDescriptor(htd);
HTableDescriptor td2 =
FSTableDescriptors.getTableDescriptorFromFs(fs, rootdir, htd.getTableName());
assertTrue(td.equals(td2));
assertTrue(htd.equals(td2));
}
@Test public void testReadingOldHTDFromFS() throws IOException, DeserializationException {
@ -186,22 +181,21 @@ public class TestFSTableDescriptors {
Path rootdir = UTIL.getDataTestDir(name);
FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
TableDescriptor td = new TableDescriptor(htd);
Path descriptorFile = fstd.updateTableDescriptor(td);
Path descriptorFile = fstd.updateTableDescriptor(htd);
try (FSDataOutputStream out = fs.create(descriptorFile, true)) {
out.write(htd.toByteArray());
}
FSTableDescriptors fstd2 = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
TableDescriptor td2 = fstd2.getDescriptor(htd.getTableName());
assertEquals(td, td2);
HTableDescriptor td2 = fstd2.get(htd.getTableName());
assertEquals(htd, td2);
FileStatus descriptorFile2 =
FSTableDescriptors.getTableInfoPath(fs, fstd2.getTableDir(htd.getTableName()));
byte[] buffer = td.toByteArray();
byte[] buffer = htd.toByteArray();
try (FSDataInputStream in = fs.open(descriptorFile2.getPath())) {
in.readFully(buffer);
}
TableDescriptor td3 = TableDescriptor.parseFrom(buffer);
assertEquals(td, td3);
HTableDescriptor td3 = HTableDescriptor.parseFrom(buffer);
assertEquals(htd, td3);
}
@Test public void testHTableDescriptors()
@ -221,7 +215,7 @@ public class TestFSTableDescriptors {
final int count = 10;
// Write out table infos.
for (int i = 0; i < count; i++) {
TableDescriptor htd = new TableDescriptor(
HTableDescriptor htd = new HTableDescriptor(
new HTableDescriptor(TableName.valueOf(name + i)));
htds.createTableDescriptor(htd);
}
@ -236,7 +230,7 @@ public class TestFSTableDescriptors {
for (int i = 0; i < count; i++) {
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name + i));
htd.addFamily(new HColumnDescriptor("" + i));
htds.updateTableDescriptor(new TableDescriptor(htd));
htds.updateTableDescriptor(htd);
}
// Wait a while so mod time we write is for sure different.
Thread.sleep(100);
@ -274,7 +268,7 @@ public class TestFSTableDescriptors {
for (int i = 0; i < count; i++) {
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name + i));
htd.addFamily(new HColumnDescriptor("" + i));
htds.updateTableDescriptor(new TableDescriptor(htd));
htds.updateTableDescriptor(htd);
}
for (int i = 0; i < count; i++) {
assertNotNull("Expected HTD, got null instead", htds.get(TableName.valueOf(name + i)));
@ -300,7 +294,7 @@ public class TestFSTableDescriptors {
htds.createTableDescriptor(htd);
}
// add hbase:meta
HTableDescriptor htd = new HTableDescriptor(HTableDescriptor.META_TABLEDESC.getTableName());
HTableDescriptor htd = new HTableDescriptor(TableName.META_TABLE_NAME);
htds.createTableDescriptor(htd);
assertEquals("getAll() didn't return all TableDescriptors, expected: " +
@ -335,7 +329,7 @@ public class TestFSTableDescriptors {
assertTrue(nonchtds.getAll().size() == chtds.getAll().size());
// add a new entry for hbase:meta
HTableDescriptor htd = new HTableDescriptor(HTableDescriptor.META_TABLEDESC.getTableName());
HTableDescriptor htd = new HTableDescriptor(TableName.META_TABLE_NAME);
nonchtds.createTableDescriptor(htd);
// hbase:meta will only increase the cachehit by 1
@ -419,19 +413,18 @@ public class TestFSTableDescriptors {
Path testdir = UTIL.getDataTestDir("testCreateTableDescriptorUpdatesIfThereExistsAlready");
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(
"testCreateTableDescriptorUpdatesIfThereExistsAlready"));
TableDescriptor td = new TableDescriptor(htd);
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, testdir);
assertTrue(fstd.createTableDescriptor(td));
assertFalse(fstd.createTableDescriptor(td));
assertTrue(fstd.createTableDescriptor(htd));
assertFalse(fstd.createTableDescriptor(htd));
htd.setValue(Bytes.toBytes("mykey"), Bytes.toBytes("myValue"));
assertTrue(fstd.createTableDescriptor(td)); //this will re-create
assertTrue(fstd.createTableDescriptor(htd)); //this will re-create
Path tableDir = fstd.getTableDir(htd.getTableName());
Path tmpTableDir = new Path(tableDir, FSTableDescriptors.TMP_DIR);
FileStatus[] statuses = fs.listStatus(tmpTableDir);
assertTrue(statuses.length == 0);
assertEquals(td, FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir));
assertEquals(htd, FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir));
}
private static class FSTableDescriptorsTest extends FSTableDescriptors {

View File

@ -33,7 +33,6 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableDescriptor;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
@ -67,6 +66,7 @@ public class TestMergeTool extends HBaseTestCase {
private MiniDFSCluster dfsCluster = null;
private WALFactory wals;
@Override
@Before
public void setUp() throws Exception {
// Set the timeout down else this test will take a while to complete.
@ -149,8 +149,7 @@ public class TestMergeTool extends HBaseTestCase {
try {
// Create meta region
createMetaRegion();
new FSTableDescriptors(this.conf, this.fs, testDir).createTableDescriptor(
new TableDescriptor(this.desc));
new FSTableDescriptors(this.conf, this.fs, testDir).createTableDescriptor(this.desc);
/*
* Create the regions we will merge
*/
@ -178,6 +177,7 @@ public class TestMergeTool extends HBaseTestCase {
}
}
@Override
@After
public void tearDown() throws Exception {
super.tearDown();