HBASE-6765 'Take a snapshot' interface (Jesse Yates)

Add interfaces taking a snapshot. This is in hopes of cutting down on the overhead involved in reviewing snapshots.



git-svn-id: https://svn.apache.org/repos/asf/hbase/branches/hbase-7290@1445769 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jonathan Hsieh 2013-02-13 17:53:26 +00:00
parent 99097d3d88
commit b251b57390
12 changed files with 5445 additions and 28 deletions

View File

@ -11303,6 +11303,735 @@ public final class HBaseProtos {
// @@protoc_insertion_point(class_scope:NameInt64Pair)
}
public interface SnapshotDescriptionOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required string name = 1;
boolean hasName();
String getName();
// optional string table = 2;
boolean hasTable();
String getTable();
// optional int64 creationTime = 3 [default = 0];
boolean hasCreationTime();
long getCreationTime();
// optional .SnapshotDescription.Type type = 4 [default = TIMESTAMP];
boolean hasType();
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type getType();
}
public static final class SnapshotDescription extends
com.google.protobuf.GeneratedMessage
implements SnapshotDescriptionOrBuilder {
// Use SnapshotDescription.newBuilder() to construct.
private SnapshotDescription(Builder builder) {
super(builder);
}
private SnapshotDescription(boolean noInit) {}
private static final SnapshotDescription defaultInstance;
public static SnapshotDescription getDefaultInstance() {
return defaultInstance;
}
public SnapshotDescription getDefaultInstanceForType() {
return defaultInstance;
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_SnapshotDescription_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_SnapshotDescription_fieldAccessorTable;
}
public enum Type
implements com.google.protobuf.ProtocolMessageEnum {
TIMESTAMP(0, 0),
GLOBAL(1, 1),
;
public static final int TIMESTAMP_VALUE = 0;
public static final int GLOBAL_VALUE = 1;
public final int getNumber() { return value; }
public static Type valueOf(int value) {
switch (value) {
case 0: return TIMESTAMP;
case 1: return GLOBAL;
default: return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap<Type>
internalGetValueMap() {
return internalValueMap;
}
private static com.google.protobuf.Internal.EnumLiteMap<Type>
internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap<Type>() {
public Type findValueByNumber(int number) {
return Type.valueOf(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(index);
}
public final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.getDescriptor().getEnumTypes().get(0);
}
private static final Type[] VALUES = {
TIMESTAMP, GLOBAL,
};
public static Type valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int index;
private final int value;
private Type(int index, int value) {
this.index = index;
this.value = value;
}
// @@protoc_insertion_point(enum_scope:SnapshotDescription.Type)
}
private int bitField0_;
// required string name = 1;
public static final int NAME_FIELD_NUMBER = 1;
private java.lang.Object name_;
public boolean hasName() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public String getName() {
java.lang.Object ref = name_;
if (ref instanceof String) {
return (String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
String s = bs.toStringUtf8();
if (com.google.protobuf.Internal.isValidUtf8(bs)) {
name_ = s;
}
return s;
}
}
private com.google.protobuf.ByteString getNameBytes() {
java.lang.Object ref = name_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8((String) ref);
name_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// optional string table = 2;
public static final int TABLE_FIELD_NUMBER = 2;
private java.lang.Object table_;
public boolean hasTable() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
public String getTable() {
java.lang.Object ref = table_;
if (ref instanceof String) {
return (String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
String s = bs.toStringUtf8();
if (com.google.protobuf.Internal.isValidUtf8(bs)) {
table_ = s;
}
return s;
}
}
private com.google.protobuf.ByteString getTableBytes() {
java.lang.Object ref = table_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8((String) ref);
table_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// optional int64 creationTime = 3 [default = 0];
public static final int CREATIONTIME_FIELD_NUMBER = 3;
private long creationTime_;
public boolean hasCreationTime() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
public long getCreationTime() {
return creationTime_;
}
// optional .SnapshotDescription.Type type = 4 [default = TIMESTAMP];
public static final int TYPE_FIELD_NUMBER = 4;
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type type_;
public boolean hasType() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type getType() {
return type_;
}
private void initFields() {
name_ = "";
table_ = "";
creationTime_ = 0L;
type_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type.TIMESTAMP;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasName()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, getNameBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBytes(2, getTableBytes());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeInt64(3, creationTime_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeEnum(4, type_.getNumber());
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(1, getNameBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(2, getTableBytes());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeInt64Size(3, creationTime_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(4, type_.getNumber());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription)) {
return super.equals(obj);
}
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription) obj;
boolean result = true;
result = result && (hasName() == other.hasName());
if (hasName()) {
result = result && getName()
.equals(other.getName());
}
result = result && (hasTable() == other.hasTable());
if (hasTable()) {
result = result && getTable()
.equals(other.getTable());
}
result = result && (hasCreationTime() == other.hasCreationTime());
if (hasCreationTime()) {
result = result && (getCreationTime()
== other.getCreationTime());
}
result = result && (hasType() == other.hasType());
if (hasType()) {
result = result &&
(getType() == other.getType());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
@java.lang.Override
public int hashCode() {
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasName()) {
hash = (37 * hash) + NAME_FIELD_NUMBER;
hash = (53 * hash) + getName().hashCode();
}
if (hasTable()) {
hash = (37 * hash) + TABLE_FIELD_NUMBER;
hash = (53 * hash) + getTable().hashCode();
}
if (hasCreationTime()) {
hash = (37 * hash) + CREATIONTIME_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getCreationTime());
}
if (hasType()) {
hash = (37 * hash) + TYPE_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getType());
}
hash = (29 * hash) + getUnknownFields().hashCode();
return hash;
}
public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseFrom(java.io.InputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder<Builder>
implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_SnapshotDescription_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_SnapshotDescription_fieldAccessorTable;
}
// Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
name_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
table_ = "";
bitField0_ = (bitField0_ & ~0x00000002);
creationTime_ = 0L;
bitField0_ = (bitField0_ & ~0x00000004);
type_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type.TIMESTAMP;
bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.getDescriptor();
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription getDefaultInstanceForType() {
return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.getDefaultInstance();
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription build() {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription buildParsed()
throws com.google.protobuf.InvalidProtocolBufferException {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(
result).asInvalidProtocolBufferException();
}
return result;
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription buildPartial() {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.name_ = name_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.table_ = table_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.creationTime_ = creationTime_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
result.type_ = type_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription) {
return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription other) {
if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.getDefaultInstance()) return this;
if (other.hasName()) {
setName(other.getName());
}
if (other.hasTable()) {
setTable(other.getTable());
}
if (other.hasCreationTime()) {
setCreationTime(other.getCreationTime());
}
if (other.hasType()) {
setType(other.getType());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasName()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder(
this.getUnknownFields());
while (true) {
int tag = input.readTag();
switch (tag) {
case 0:
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
}
break;
}
case 10: {
bitField0_ |= 0x00000001;
name_ = input.readBytes();
break;
}
case 18: {
bitField0_ |= 0x00000002;
table_ = input.readBytes();
break;
}
case 24: {
bitField0_ |= 0x00000004;
creationTime_ = input.readInt64();
break;
}
case 32: {
int rawValue = input.readEnum();
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type value = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(4, rawValue);
} else {
bitField0_ |= 0x00000008;
type_ = value;
}
break;
}
}
}
}
private int bitField0_;
// required string name = 1;
private java.lang.Object name_ = "";
public boolean hasName() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public String getName() {
java.lang.Object ref = name_;
if (!(ref instanceof String)) {
String s = ((com.google.protobuf.ByteString) ref).toStringUtf8();
name_ = s;
return s;
} else {
return (String) ref;
}
}
public Builder setName(String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
name_ = value;
onChanged();
return this;
}
public Builder clearName() {
bitField0_ = (bitField0_ & ~0x00000001);
name_ = getDefaultInstance().getName();
onChanged();
return this;
}
void setName(com.google.protobuf.ByteString value) {
bitField0_ |= 0x00000001;
name_ = value;
onChanged();
}
// optional string table = 2;
private java.lang.Object table_ = "";
public boolean hasTable() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
public String getTable() {
java.lang.Object ref = table_;
if (!(ref instanceof String)) {
String s = ((com.google.protobuf.ByteString) ref).toStringUtf8();
table_ = s;
return s;
} else {
return (String) ref;
}
}
public Builder setTable(String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
table_ = value;
onChanged();
return this;
}
public Builder clearTable() {
bitField0_ = (bitField0_ & ~0x00000002);
table_ = getDefaultInstance().getTable();
onChanged();
return this;
}
void setTable(com.google.protobuf.ByteString value) {
bitField0_ |= 0x00000002;
table_ = value;
onChanged();
}
// optional int64 creationTime = 3 [default = 0];
private long creationTime_ ;
public boolean hasCreationTime() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
public long getCreationTime() {
return creationTime_;
}
public Builder setCreationTime(long value) {
bitField0_ |= 0x00000004;
creationTime_ = value;
onChanged();
return this;
}
public Builder clearCreationTime() {
bitField0_ = (bitField0_ & ~0x00000004);
creationTime_ = 0L;
onChanged();
return this;
}
// optional .SnapshotDescription.Type type = 4 [default = TIMESTAMP];
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type type_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type.TIMESTAMP;
public boolean hasType() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type getType() {
return type_;
}
public Builder setType(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000008;
type_ = value;
onChanged();
return this;
}
public Builder clearType() {
bitField0_ = (bitField0_ & ~0x00000008);
type_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type.TIMESTAMP;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:SnapshotDescription)
}
static {
defaultInstance = new SnapshotDescription(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:SnapshotDescription)
}
private static com.google.protobuf.Descriptors.Descriptor
internal_static_TableSchema_descriptor;
private static
@ -11388,6 +12117,11 @@ public final class HBaseProtos {
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_NameInt64Pair_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_SnapshotDescription_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_SnapshotDescription_fieldAccessorTable;
public static com.google.protobuf.Descriptors.FileDescriptor
getDescriptor() {
@ -11440,14 +12174,18 @@ public final class HBaseProtos {
"value\030\002 \002(\t\",\n\rNameBytesPair\022\014\n\004name\030\001 \002" +
"(\t\022\r\n\005value\030\002 \001(\014\"/\n\016BytesBytesPair\022\r\n\005f" +
"irst\030\001 \002(\014\022\016\n\006second\030\002 \002(\014\",\n\rNameInt64P" +
"air\022\014\n\004name\030\001 \001(\t\022\r\n\005value\030\002 \001(\003*r\n\013Comp" +
"areType\022\010\n\004LESS\020\000\022\021\n\rLESS_OR_EQUAL\020\001\022\t\n\005" +
"EQUAL\020\002\022\r\n\tNOT_EQUAL\020\003\022\024\n\020GREATER_OR_EQU" +
"AL\020\004\022\013\n\007GREATER\020\005\022\t\n\005NO_OP\020\006*_\n\007KeyType\022" +
"\013\n\007MINIMUM\020\000\022\007\n\003PUT\020\004\022\n\n\006DELETE\020\010\022\021\n\rDEL" +
"ETE_COLUMN\020\014\022\021\n\rDELETE_FAMILY\020\016\022\014\n\007MAXIM" +
"UM\020\377\001B>\n*org.apache.hadoop.hbase.protobu",
"f.generatedB\013HBaseProtosH\001\240\001\001"
"air\022\014\n\004name\030\001 \001(\t\022\r\n\005value\030\002 \001(\003\"\242\001\n\023Sna" +
"pshotDescription\022\014\n\004name\030\001 \002(\t\022\r\n\005table\030" +
"\002 \001(\t\022\027\n\014creationTime\030\003 \001(\003:\0010\0222\n\004type\030\004" +
" \001(\0162\031.SnapshotDescription.Type:\tTIMESTA" +
"MP\"!\n\004Type\022\r\n\tTIMESTAMP\020\000\022\n\n\006GLOBAL\020\001*r\n" +
"\013CompareType\022\010\n\004LESS\020\000\022\021\n\rLESS_OR_EQUAL\020" +
"\001\022\t\n\005EQUAL\020\002\022\r\n\tNOT_EQUAL\020\003\022\024\n\020GREATER_O",
"R_EQUAL\020\004\022\013\n\007GREATER\020\005\022\t\n\005NO_OP\020\006*_\n\007Key" +
"Type\022\013\n\007MINIMUM\020\000\022\007\n\003PUT\020\004\022\n\n\006DELETE\020\010\022\021" +
"\n\rDELETE_COLUMN\020\014\022\021\n\rDELETE_FAMILY\020\016\022\014\n\007" +
"MAXIMUM\020\377\001B>\n*org.apache.hadoop.hbase.pr" +
"otobuf.generatedB\013HBaseProtosH\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@ -11590,6 +12328,14 @@ public final class HBaseProtos {
new java.lang.String[] { "Name", "Value", },
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair.class,
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair.Builder.class);
internal_static_SnapshotDescription_descriptor =
getDescriptor().getMessageTypes().get(15);
internal_static_SnapshotDescription_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_SnapshotDescription_descriptor,
new java.lang.String[] { "Name", "Table", "CreationTime", "Type", },
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.class,
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder.class);
return null;
}
};

View File

@ -177,6 +177,40 @@ message IsCatalogJanitorEnabledResponse {
required bool value = 1;
}
message TakeSnapshotRequest{
required SnapshotDescription snapshot = 1;
}
message TakeSnapshotResponse{
required int64 expectedTime = 1;
}
message ListSnapshotRequest{
}
message ListSnapshotResponse{
repeated SnapshotDescription snapshots = 1;
}
message DeleteSnapshotRequest{
required SnapshotDescription snapshot = 1;
}
message DeleteSnapshotResponse{
}
/* if you don't send the snapshot, then you will get it back
* in the response (if the snapshot is done) so you can check the snapshot
*/
message IsSnapshotDoneRequest{
optional SnapshotDescription snapshot = 1;
}
message IsSnapshotDoneResponse{
optional bool done = 1 [default = false];
optional SnapshotDescription snapshot = 2;
}
service MasterAdminService {
/** Adds a column to the specified table. */
rpc addColumn(AddColumnRequest)
@ -280,4 +314,27 @@ service MasterAdminService {
*/
rpc execMasterService(CoprocessorServiceRequest)
returns(CoprocessorServiceResponse);
/**
* Create a snapshot for the given table.
* @param snapshot description of the snapshot to take
*/
rpc snapshot(TakeSnapshotRequest) returns(TakeSnapshotResponse);
/**
* List existing snapshots.
* @return a list of snapshot descriptors
*/
rpc listSnapshots(ListSnapshotRequest) returns(ListSnapshotResponse);
/**
* Delete an existing snapshot. This method can also be used to clean up a aborted snapshot.
* @param snapshotName snapshot to delete
*/
rpc deleteSnapshot(DeleteSnapshotRequest) returns(DeleteSnapshotResponse);
/**
* Determine if the snapshot is done yet.
*/
rpc isSnapshotDone(IsSnapshotDoneRequest) returns(IsSnapshotDoneResponse);
}

View File

@ -268,3 +268,17 @@ message NameInt64Pair {
optional string name = 1;
optional int64 value = 2;
}
/**
* Description of the snapshot to take
*/
message SnapshotDescription {
required string name = 1;
optional string table = 2; // not needed for delete, but checked for in taking snapshot
optional int64 creationTime = 3 [default = 0];
enum Type {
TIMESTAMP = 0;
GLOBAL = 1;
}
optional Type type = 4 [default = TIMESTAMP];
}

View File

@ -18,21 +18,25 @@
package org.apache.hadoop.hbase;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MasterAdminService;
import org.apache.hadoop.hbase.security.TokenInfo;
import org.apache.hadoop.hbase.security.KerberosInfo;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AddColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AddColumnResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteColumnResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableRequest;
@ -43,6 +47,11 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableR
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MasterAdminService;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyColumnResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableRequest;
@ -53,17 +62,19 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.OfflineRegio
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.OfflineRegionResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse;
import org.apache.hadoop.hbase.security.KerberosInfo;
import org.apache.hadoop.hbase.security.TokenInfo;
import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
@ -346,4 +357,54 @@ public interface MasterAdminProtocol extends
@Override
public IsCatalogJanitorEnabledResponse isCatalogJanitorEnabled(RpcController c,
IsCatalogJanitorEnabledRequest req) throws ServiceException;
/**
* Create a snapshot for the given table.
* @param controller Unused (set to null).
* @param snapshot description of the snapshot to take
* @return empty response on success
* @throws ServiceException if the snapshot cannot be taken
*/
@Override
public TakeSnapshotResponse snapshot(RpcController controller, TakeSnapshotRequest snapshot)
throws ServiceException;
/**
* List existing snapshots.
* @param controller Unused (set to null).
* @param request information about the request (can be empty)
* @return {@link ListSnapshotResponse} - a list of {@link SnapshotDescription}
* @throws ServiceException if we cannot reach the filesystem
*/
@Override
public ListSnapshotResponse listSnapshots(RpcController controller, ListSnapshotRequest request)
throws ServiceException;
/**
* Delete an existing snapshot. This method can also be used to clean up a aborted snapshot.
* @param controller Unused (set to null).
* @param snapshotName snapshot to delete
* @return <tt>true</tt> if the snapshot was deleted, <tt>false</tt> if the snapshot didn't exist
* originally
* @throws ServiceException if the filesystem cannot be reached
*/
@Override
public DeleteSnapshotResponse deleteSnapshot(RpcController controller,
DeleteSnapshotRequest snapshotName) throws ServiceException;
/**
* Check to see if the snapshot is done.
* @param controller Unused (set to null).
* @param request name of the snapshot to check.
* @throws ServiceException around possible exceptions:
* <ol>
* <li>{@link UnknownSnapshotException} if the passed snapshot name doesn't match the
* current snapshot <i>or</i> there is no previous snapshot.</li>
* <li>{@link SnapshotCreationException} if the snapshot couldn't complete because of
* errors</li>
* </ol>
*/
@Override
public IsSnapshotDoneResponse isSnapshotDone(RpcController controller,
IsSnapshotDoneRequest request) throws ServiceException;
}

View File

@ -74,20 +74,28 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRespo
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AddColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MoveRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetSchemaAlterStatusRequest;
@ -95,8 +103,13 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetSchemaA
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsResponse;
import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
import org.apache.hadoop.hbase.util.Addressing;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.util.StringUtils;
@ -2084,6 +2097,261 @@ public class HBaseAdmin implements Abortable, Closeable {
return state;
}
/**
* Create a timestamp consistent snapshot for the given table.
* <p>
* Snapshots are considered unique based on <b>the name of the snapshot</b>. Attempts to take a
* snapshot with the same name (even a different type or with different parameters) will fail with
* a {@link SnapshotCreationException} indicating the duplicate naming.
* <p>
* Snapshot names follow the same naming constraints as tables in HBase. See
* {@link HTableDescriptor#isLegalTableName(byte[])}.
* @param snapshotName name of the snapshot to be created
* @param tableName name of the table for which snapshot is created
* @throws IOException if a remote or network exception occurs
* @throws SnapshotCreationException if snapshot creation failed
* @throws IllegalArgumentException if the snapshot request is formatted incorrectly
*/
public void snapshot(final String snapshotName, final String tableName) throws IOException,
SnapshotCreationException, IllegalArgumentException {
snapshot(snapshotName, tableName, SnapshotDescription.Type.TIMESTAMP);
}
/**
* Create a timestamp consistent snapshot for the given table.
* <p>
* Snapshots are considered unique based on <b>the name of the snapshot</b>. Attempts to take a
* snapshot with the same name (even a different type or with different parameters) will fail with
* a {@link SnapshotCreationException} indicating the duplicate naming.
* <p>
* Snapshot names follow the same naming constraints as tables in HBase. See
* {@link HTableDescriptor#isLegalTableName(byte[])}.
* @param snapshotName name of the snapshot to be created
* @param tableName name of the table for which snapshot is created
* @throws IOException if a remote or network exception occurs
* @throws SnapshotCreationException if snapshot creation failed
* @throws IllegalArgumentException if the snapshot request is formatted incorrectly
*/
public void snapshot(final byte[] snapshotName, final byte[] tableName) throws IOException,
SnapshotCreationException, IllegalArgumentException {
snapshot(Bytes.toString(snapshotName), Bytes.toString(tableName));
}
/**
* Create typed snapshot of the table.
* <p>
* Snapshots are considered unique based on <b>the name of the snapshot</b>. Attempts to take a
* snapshot with the same name (even a different type or with different parameters) will fail with
* a {@link SnapshotCreationException} indicating the duplicate naming.
* <p>
* Snapshot names follow the same naming constraints as tables in HBase. See
* {@link HTableDescriptor#isLegalTableName(byte[])}.
* <p>
* Generally, you should <b>not</b> use this, but instead just take a {@link Type#TIMESTAMP
* Timestamp-consistentSnapshot} with {@link #snapshot(byte[], byte[])} or
* {@link #snapshot(String, String)}, which creates a timestamp-based snapshot, causing minimal
* interference with running cluster.
* <p>
* However, this method can be used to launch a {@link Type#GLOBAL GlobalSnapshot}. Note that a
* {@link Type#GLOBAL GlobalSnapshot}will <b>block all writes to the table</b> while taking the
* snapshot. This occurs so a single stable state can be achieved across all servers hosting the
* table - this is beyond the consistency constraints placed on an HBase table. This type of
* snapshot has two main implications:
* <ul>
* <li>all writes to the table will block while taking the snapshot</li>
* <li>the probability of success decreases with increasing cluster size and is not recommended
* for clusters much greater than 500 nodes</li>
* </ul>
* Together, the two above considerations mean to get a snapshot with any real load on your
* system, you will likely have multiple attempts and will suffer notable performance degradation,
* for a large cluster.
* <p>
* This can be suitable for a smaller cluster, but comes with the above caveats - user beware (you
* should really consider if you can get by with just using timestamp-consistent snapshots via
* {@link #snapshot(byte[], byte[])}) or {@link #snapshot(String, String)}.
* @param snapshotName name to give the snapshot on the filesystem. Must be unique from all other
* snapshots stored on the cluster
* @param tableName name of the table to snapshot
* @param type type of snapshot to take
* @throws IOException we fail to reach the master
* @throws SnapshotCreationException if snapshot creation failed
* @throws IllegalArgumentException if the snapshot request is formatted incorrectly
*/
public void snapshot(final String snapshotName, final String tableName,
SnapshotDescription.Type type) throws IOException, SnapshotCreationException,
IllegalArgumentException {
SnapshotDescription.Builder builder = SnapshotDescription.newBuilder();
builder.setTable(tableName);
builder.setName(snapshotName);
builder.setType(type);
snapshot(builder.build());
}
/**
* Take a snapshot and wait for the server to complete that snapshot (blocking).
* <p>
* Only a single snapshot should be taken at a time for an instance of HBase, or results may be
* undefined (you can tell multiple HBase clusters to snapshot at the same time, but only one at a
* time for a single cluster).
* <p>
* Snapshots are considered unique based on <b>the name of the snapshot</b>. Attempts to take a
* snapshot with the same name (even a different type or with different parameters) will fail with
* a {@link SnapshotCreationException} indicating the duplicate naming.
* <p>
* Snapshot names follow the same naming constraints as tables in HBase. See
* {@link HTableDescriptor#isLegalTableName(byte[])}.
* <p>
* You should probably use {@link #snapshot(String, String)} or {@link #snapshot(byte[], byte[])}
* unless you are sure about the type of snapshot that you want to take.
* @param snapshot snapshot to take
* @throws IOException or we lose contact with the master.
* @throws SnapshotCreationException if snapshot failed to be taken
* @throws IllegalArgumentException if the snapshot request is formatted incorrectly
*/
public void snapshot(SnapshotDescription snapshot) throws IOException, SnapshotCreationException,
IllegalArgumentException {
// make sure the snapshot is valid
SnapshotDescriptionUtils.assertSnapshotRequestIsValid(snapshot);
// actually take the snapshot
TakeSnapshotResponse response = takeSnapshotAsync(snapshot);
final IsSnapshotDoneRequest request = IsSnapshotDoneRequest.newBuilder().setSnapshot(snapshot)
.build();
IsSnapshotDoneResponse done = IsSnapshotDoneResponse.newBuilder().buildPartial();
long start = EnvironmentEdgeManager.currentTimeMillis();
long max = response.getExpectedTime();
long maxPauseTime = max / this.numRetries;
int tries = 0;
LOG.debug("Waiting a max of " + max + " ms for snapshot to complete. (max " + maxPauseTime
+ " ms per retry)");
while ((EnvironmentEdgeManager.currentTimeMillis() - start) < max && !done.getDone()) {
try {
// sleep a backoff <= pauseTime amount
long sleep = getPauseTime(tries++);
LOG.debug("Found sleep:" + sleep);
sleep = sleep > maxPauseTime ? maxPauseTime : sleep;
LOG.debug(tries + ") Sleeping: " + sleep + " ms while we wait for snapshot to complete.");
Thread.sleep(sleep);
} catch (InterruptedException e) {
LOG.debug("Interrupted while waiting for snapshot " + snapshot + " to complete");
Thread.currentThread().interrupt();
}
LOG.debug("Getting current status of snasphot from master...");
done = execute(new MasterAdminCallable<IsSnapshotDoneResponse>() {
@Override
public IsSnapshotDoneResponse call() throws ServiceException {
return masterAdmin.isSnapshotDone(null, request);
}
});
}
if (!done.getDone()) {
throw new SnapshotCreationException("Snapshot '" + snapshot.getName()
+ "' wasn't completed in expectedTime:" + max + " ms");
}
}
/**
* Take a snapshot and wait for the server to complete that snapshot (asynchronous)
* <p>
* Only a single snapshot should be taken at a time, or results may be undefined.
* @param snapshot snapshot to take
* @return response from the server indicating the max time to wait for the snapshot
* @throws IOException if the snapshot did not succeed or we lose contact with the master.
* @throws SnapshotCreationException if snapshot creation failed
* @throws IllegalArgumentException if the snapshot request is formatted incorrectly
*/
public TakeSnapshotResponse takeSnapshotAsync(SnapshotDescription snapshot) throws IOException,
SnapshotCreationException {
SnapshotDescriptionUtils.assertSnapshotRequestIsValid(snapshot);
final TakeSnapshotRequest request = TakeSnapshotRequest.newBuilder().setSnapshot(snapshot)
.build();
// run the snapshot on the master
return execute(new MasterAdminCallable<TakeSnapshotResponse>() {
@Override
public TakeSnapshotResponse call() throws ServiceException {
return masterAdmin.snapshot(null, request);
}
});
}
/**
* Check the current state of the passed snapshot.
* <p>
* There are three possible states:
* <ol>
* <li>running - returns <tt>false</tt></li>
* <li>finished - returns <tt>true</tt></li>
* <li>finished with error - throws the exception that caused the snapshot to fail</li>
* </ol>
* <p>
* The cluster only knows about the most recent snapshot. Therefore, if another snapshot has been
* run/started since the snapshot your are checking, you will recieve an
* {@link UnknownSnapshotException}.
* @param snapshot description of the snapshot to check
* @return <tt>true</tt> if the snapshot is completed, <tt>false</tt> if the snapshot is still
* running
* @throws IOException if we have a network issue
* @throws HBaseSnapshotException if the snapshot failed
* @throws UnknownSnapshotException if the requested snapshot is unknown
*/
public boolean isSnapshotFinished(final SnapshotDescription snapshot)
throws IOException, HBaseSnapshotException, UnknownSnapshotException {
return execute(new MasterAdminCallable<IsSnapshotDoneResponse>() {
@Override
public IsSnapshotDoneResponse call() throws ServiceException {
return masterAdmin.isSnapshotDone(null,
IsSnapshotDoneRequest.newBuilder().setSnapshot(snapshot).build());
}
}).getDone();
}
/**
* List existing snapshots.
* @return a list of snapshot descriptor for existing snapshots
* @throws IOException if a network error occurs
*/
public List<SnapshotDescription> listSnapshots() throws IOException {
return execute(new MasterAdminCallable<List<SnapshotDescription>>() {
@Override
public List<SnapshotDescription> call() throws ServiceException {
return masterAdmin.listSnapshots(null, ListSnapshotRequest.newBuilder().build())
.getSnapshotsList();
}
});
}
/**
* Delete an existing snapshot.
* @param snapshotName name of the snapshot
* @throws IOException if a remote or network exception occurs
*/
public void deleteSnapshot(final byte[] snapshotName) throws IOException {
deleteSnapshot(Bytes.toString(snapshotName));
}
/**
* Delete an existing snapshot.
* @param snapshotName name of the snapshot
* @throws IOException if a remote or network exception occurs
*/
public void deleteSnapshot(final String snapshotName) throws IOException {
// make sure the snapshot is possibly valid
HTableDescriptor.isLegalTableName(Bytes.toBytes(snapshotName));
// do the delete
execute(new MasterAdminCallable<Void>() {
@Override
public Void call() throws ServiceException {
masterAdmin.deleteSnapshot(
null,
DeleteSnapshotRequest.newBuilder()
.setSnapshot(SnapshotDescription.newBuilder().setName(snapshotName).build()).build());
return null;
}
});
}
/**
* @see {@link #execute(MasterAdminCallable<V>)}
*/

View File

@ -125,6 +125,8 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableR
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteColumnResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableRequest;
@ -135,6 +137,10 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableR
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyColumnResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableRequest;
@ -149,6 +155,8 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownRequ
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusRequest;
@ -170,6 +178,7 @@ import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.Repor
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.replication.regionserver.Replication;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.trace.SpanReceiverHost;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.CompressionTest;
import org.apache.hadoop.hbase.util.FSTableDescriptors;
@ -2399,4 +2408,32 @@ Server {
public HFileCleaner getHFileCleaner() {
return this.hfileCleaner;
}
@Override
public TakeSnapshotResponse snapshot(RpcController controller, TakeSnapshotRequest request)
throws ServiceException {
throw new ServiceException(new UnsupportedOperationException(
"Snapshots are not implemented yet."));
}
@Override
public ListSnapshotResponse listSnapshots(RpcController controller, ListSnapshotRequest request)
throws ServiceException {
throw new ServiceException(new UnsupportedOperationException(
"Snapshots are not implemented yet."));
}
@Override
public DeleteSnapshotResponse deleteSnapshot(RpcController controller,
DeleteSnapshotRequest request) throws ServiceException {
throw new ServiceException(new UnsupportedOperationException(
"Snapshots are not implemented yet."));
}
@Override
public IsSnapshotDoneResponse isSnapshotDone(RpcController controller,
IsSnapshotDoneRequest request) throws ServiceException {
throw new ServiceException(new UnsupportedOperationException(
"Snapshots are not implemented yet."));
}
}

View File

@ -0,0 +1,61 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.snapshot;
import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
/**
* General exception when a snapshot fails.
*/
@SuppressWarnings("serial")
public class HBaseSnapshotException extends HBaseIOException {
private SnapshotDescription description;
public HBaseSnapshotException(String msg) {
super(msg);
}
public HBaseSnapshotException(String msg, Throwable cause) {
super(msg, cause);
}
public HBaseSnapshotException(Throwable cause) {
super(cause);
}
public HBaseSnapshotException(String msg, SnapshotDescription desc) {
super(msg);
this.description = desc;
}
public HBaseSnapshotException(Throwable cause, SnapshotDescription desc) {
super(cause);
this.description = desc;
}
public HBaseSnapshotException(String msg, Throwable cause, SnapshotDescription desc) {
super(msg, cause);
this.description = desc;
}
public SnapshotDescription getSnapshotDescription() {
return this.description;
}
}

View File

@ -0,0 +1,51 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.snapshot;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
/**
* Thrown when a snapshot could not be created due to a server-side error when taking the snapshot.
*/
@SuppressWarnings("serial")
public class SnapshotCreationException extends HBaseSnapshotException {
public SnapshotCreationException(String msg, SnapshotDescription desc) {
super(msg, desc);
}
public SnapshotCreationException(String msg, Throwable cause, SnapshotDescription desc) {
super(msg, cause, desc);
}
public SnapshotCreationException(String msg, Throwable cause) {
super(msg, cause);
}
public SnapshotCreationException(String msg) {
super(msg);
}
public SnapshotCreationException(Throwable cause, SnapshotDescription desc) {
super(cause, desc);
}
public SnapshotCreationException(Throwable cause) {
super(cause);
}
}

View File

@ -0,0 +1,47 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.snapshot;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.util.Bytes;
/**
* Utility class to help manage {@link SnapshotDescription SnapshotDesriptions}.
*/
public class SnapshotDescriptionUtils {
private SnapshotDescriptionUtils() {
// private constructor for utility class
}
/**
* Check to make sure that the description of the snapshot requested is valid
* @param snapshot description of the snapshot
* @throws IllegalArgumentException if the name of the snapshot or the name of the table to
* snapshot are not valid names.
*/
public static void assertSnapshotRequestIsValid(SnapshotDescription snapshot)
throws IllegalArgumentException {
// FIXME these method names is really bad - trunk will probably change
// make sure the snapshot name is valid
HTableDescriptor.isLegalTableName(Bytes.toBytes(snapshot.getName()));
// make sure the table name is valid
HTableDescriptor.isLegalTableName(Bytes.toBytes(snapshot.getTable()));
}
}

View File

@ -0,0 +1,29 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.snapshot;
/**
* Exception thrown when we get a snapshot error about a snapshot we don't know or recognize.
*/
@SuppressWarnings("serial")
public class UnknownSnapshotException extends SnapshotCreationException {
public UnknownSnapshotException(String msg) {
super(msg);
}
}

View File

@ -0,0 +1,143 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.client;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.SmallTests;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.mockito.Mockito;
import com.google.protobuf.RpcController;
/**
* Test snapshot logic from the client
*/
@Category(SmallTests.class)
public class TestSnapshotsFromAdmin {
private static final Log LOG = LogFactory.getLog(TestSnapshotsFromAdmin.class);
/**
* Test that the logic for doing 'correct' back-off based on exponential increase and the max-time
* passed from the server ensures the correct overall waiting for the snapshot to finish.
* @throws Exception
*/
@Test(timeout = 10000)
public void testBackoffLogic() throws Exception {
final int maxWaitTime = 7500;
final int numRetries = 10;
final int pauseTime = 500;
// calculate the wait time, if we just do straight backoff (ignoring the expected time from
// master)
long ignoreExpectedTime = 0;
for (int i = 0; i < 6; i++) {
ignoreExpectedTime += HConstants.RETRY_BACKOFF[i] * pauseTime;
}
// the correct wait time, capping at the maxTime/tries + fudge room
final long time = pauseTime * 3 + ((maxWaitTime / numRetries) * 3) + 300;
assertTrue("Capped snapshot wait time isn't less that the uncapped backoff time "
+ "- further testing won't prove anything.", time < ignoreExpectedTime);
// setup the mocks
HConnectionManager.HConnectionImplementation mockConnection = Mockito
.mock(HConnectionManager.HConnectionImplementation.class);
Configuration conf = HBaseConfiguration.create();
// setup the conf to match the expected properties
conf.setInt("hbase.client.retries.number", numRetries);
conf.setLong("hbase.client.pause", pauseTime);
// mock the master admin to our mock
MasterAdminKeepAliveConnection mockMaster = Mockito.mock(MasterAdminKeepAliveConnection.class);
Mockito.when(mockConnection.getConfiguration()).thenReturn(conf);
Mockito.when(mockConnection.getKeepAliveMasterAdmin()).thenReturn(mockMaster);
// set the max wait time for the snapshot to complete
TakeSnapshotResponse response = TakeSnapshotResponse.newBuilder().setExpectedTime(maxWaitTime)
.build();
Mockito
.when(
mockMaster.snapshot((RpcController) Mockito.isNull(),
Mockito.any(TakeSnapshotRequest.class))).thenReturn(response);
// setup the response
IsSnapshotDoneResponse.Builder builder = IsSnapshotDoneResponse.newBuilder();
builder.setDone(false);
// first five times, we return false, last we get success
Mockito.when(
mockMaster.isSnapshotDone((RpcController) Mockito.isNull(),
Mockito.any(IsSnapshotDoneRequest.class))).thenReturn(builder.build(), builder.build(),
builder.build(), builder.build(), builder.build(), builder.setDone(true).build());
// setup the admin and run the test
HBaseAdmin admin = new HBaseAdmin(mockConnection);
String snapshot = "snasphot";
String table = "table";
// get start time
long start = System.currentTimeMillis();
admin.snapshot(snapshot, table);
long finish = System.currentTimeMillis();
long elapsed = (finish - start);
assertTrue("Elapsed time:" + elapsed + " is more than expected max:" + time, elapsed <= time);
}
/**
* Make sure that we validate the snapshot name and the table name before we pass anything across
* the wire
* @throws IOException on failure
*/
@Test
public void testValidateSnapshotName() throws IOException {
HConnectionManager.HConnectionImplementation mockConnection = Mockito
.mock(HConnectionManager.HConnectionImplementation.class);
Configuration conf = HBaseConfiguration.create();
Mockito.when(mockConnection.getConfiguration()).thenReturn(conf);
HBaseAdmin admin = new HBaseAdmin(mockConnection);
SnapshotDescription.Builder builder = SnapshotDescription.newBuilder();
// check that invalid snapshot names fail
failSnapshotStart(admin, builder.setName(".snapshot").build());
failSnapshotStart(admin, builder.setName("-snapshot").build());
failSnapshotStart(admin, builder.setName("snapshot fails").build());
failSnapshotStart(admin, builder.setName("snap$hot").build());
// check the table name also get verified
failSnapshotStart(admin, builder.setName("snapshot").setTable(".table").build());
failSnapshotStart(admin, builder.setName("snapshot").setTable("-table").build());
failSnapshotStart(admin, builder.setName("snapshot").setTable("table fails").build());
failSnapshotStart(admin, builder.setName("snapshot").setTable("tab%le").build());
}
private void failSnapshotStart(HBaseAdmin admin, SnapshotDescription snapshot) throws IOException {
try {
admin.snapshot(snapshot);
fail("Snapshot should not have succeed with name:" + snapshot.getName());
} catch (IllegalArgumentException e) {
LOG.debug("Correctly failed to start snapshot:" + e.getMessage());
}
}
}