HBASE-14355 Scan different TimeRange for each column family (Churro Morales)

This commit is contained in:
stack 2015-11-12 10:54:16 -10:00
parent b677f2e65d
commit 290ecbe829
18 changed files with 1826 additions and 267 deletions

View File

@ -125,6 +125,10 @@ public class Get extends Query
for (Map.Entry<String, byte[]> attr : get.getAttributesMap().entrySet()) {
setAttribute(attr.getKey(), attr.getValue());
}
for (Map.Entry<byte[], TimeRange> entry : get.getColumnFamilyTimeRange().entrySet()) {
TimeRange tr = entry.getValue();
setColumnFamilyTimeRange(entry.getKey(), tr.getMin(), tr.getMax());
}
}
public boolean isCheckExistenceOnly() {
@ -195,11 +199,10 @@ public class Get extends Query
* [minStamp, maxStamp).
* @param minStamp minimum timestamp value, inclusive
* @param maxStamp maximum timestamp value, exclusive
* @throws IOException if invalid time range
* @throws IOException
* @return this for invocation chaining
*/
public Get setTimeRange(long minStamp, long maxStamp)
throws IOException {
public Get setTimeRange(long minStamp, long maxStamp) throws IOException {
tr = new TimeRange(minStamp, maxStamp);
return this;
}
@ -213,7 +216,7 @@ public class Get extends Query
throws IOException {
try {
tr = new TimeRange(timestamp, timestamp+1);
} catch(IOException e) {
} catch(Exception e) {
// This should never happen, unless integer overflow or something extremely wrong...
LOG.error("TimeRange failed, likely caused by integer overflow. ", e);
throw e;
@ -221,6 +224,10 @@ public class Get extends Query
return this;
}
@Override public Get setColumnFamilyTimeRange(byte[] cf, long minStamp, long maxStamp) {
return (Get) super.setColumnFamilyTimeRange(cf, minStamp, maxStamp);
}
/**
* Get all available versions.
* @return this for invocation chaining

View File

@ -19,10 +19,12 @@ package org.apache.hadoop.hbase.client;
import java.util.Map;
import com.google.common.collect.Maps;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.io.TimeRange;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.security.access.AccessControlConstants;
import org.apache.hadoop.hbase.security.access.Permission;
@ -31,6 +33,7 @@ import org.apache.hadoop.hbase.security.visibility.VisibilityConstants;
import com.google.common.collect.ArrayListMultimap;
import com.google.common.collect.ListMultimap;
import org.apache.hadoop.hbase.util.Bytes;
@InterfaceAudience.Public
@InterfaceStability.Evolving
@ -39,6 +42,7 @@ public abstract class Query extends OperationWithAttributes {
protected Filter filter = null;
protected int targetReplicaId = -1;
protected Consistency consistency = Consistency.STRONG;
protected Map<byte[], TimeRange> colFamTimeRangeMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
/**
* @return Filter
@ -173,4 +177,32 @@ public abstract class Query extends OperationWithAttributes {
return attr == null ? IsolationLevel.READ_COMMITTED :
IsolationLevel.fromBytes(attr);
}
/**
* Get versions of columns only within the specified timestamp range,
* [minStamp, maxStamp) on a per CF bases. Note, default maximum versions to return is 1. If
* your time range spans more than one version and you want all versions
* returned, up the number of versions beyond the default.
* Column Family time ranges take precedence over the global time range.
*
* @param cf the column family for which you want to restrict
* @param minStamp minimum timestamp value, inclusive
* @param maxStamp maximum timestamp value, exclusive
* @return this
*/
public Query setColumnFamilyTimeRange(byte[] cf, long minStamp, long maxStamp) {
colFamTimeRangeMap.put(cf, new TimeRange(minStamp, maxStamp));
return this;
}
/**
* @return Map<byte[], TimeRange> a map of column families to time ranges
*/
public Map<byte[], TimeRange> getColumnFamilyTimeRange() {
return this.colFamTimeRangeMap;
}
}

View File

@ -248,6 +248,10 @@ public class Scan extends Query {
for (Map.Entry<String, byte[]> attr : scan.getAttributesMap().entrySet()) {
setAttribute(attr.getKey(), attr.getValue());
}
for (Map.Entry<byte[], TimeRange> entry : scan.getColumnFamilyTimeRange().entrySet()) {
TimeRange tr = entry.getValue();
setColumnFamilyTimeRange(entry.getKey(), tr.getMin(), tr.getMax());
}
}
/**
@ -270,6 +274,10 @@ public class Scan extends Query {
for (Map.Entry<String, byte[]> attr : get.getAttributesMap().entrySet()) {
setAttribute(attr.getKey(), attr.getValue());
}
for (Map.Entry<byte[], TimeRange> entry : get.getColumnFamilyTimeRange().entrySet()) {
TimeRange tr = entry.getValue();
setColumnFamilyTimeRange(entry.getKey(), tr.getMin(), tr.getMax());
}
}
public boolean isGetScan() {
@ -321,13 +329,11 @@ public class Scan extends Query {
* returned, up the number of versions beyond the default.
* @param minStamp minimum timestamp value, inclusive
* @param maxStamp maximum timestamp value, exclusive
* @throws IOException if invalid time range
* @see #setMaxVersions()
* @see #setMaxVersions(int)
* @return this
*/
public Scan setTimeRange(long minStamp, long maxStamp)
throws IOException {
public Scan setTimeRange(long minStamp, long maxStamp) throws IOException {
tr = new TimeRange(minStamp, maxStamp);
return this;
}
@ -346,7 +352,7 @@ public class Scan extends Query {
throws IOException {
try {
tr = new TimeRange(timestamp, timestamp+1);
} catch(IOException e) {
} catch(Exception e) {
// This should never happen, unless integer overflow or something extremely wrong...
LOG.error("TimeRange failed, likely caused by integer overflow. ", e);
throw e;
@ -354,6 +360,10 @@ public class Scan extends Query {
return this;
}
@Override public Scan setColumnFamilyTimeRange(byte[] cf, long minStamp, long maxStamp) {
return (Scan) super.setColumnFamilyTimeRange(cf, minStamp, maxStamp);
}
/**
* Set the start row of the scan.
* <p>

View File

@ -458,17 +458,16 @@ public final class ProtobufUtil {
if (proto.hasStoreOffset()) {
get.setRowOffsetPerColumnFamily(proto.getStoreOffset());
}
if (proto.getCfTimeRangeCount() > 0) {
for (HBaseProtos.ColumnFamilyTimeRange cftr : proto.getCfTimeRangeList()) {
TimeRange timeRange = protoToTimeRange(cftr.getTimeRange());
get.setColumnFamilyTimeRange(cftr.getColumnFamily().toByteArray(),
timeRange.getMin(), timeRange.getMax());
}
}
if (proto.hasTimeRange()) {
HBaseProtos.TimeRange timeRange = proto.getTimeRange();
long minStamp = 0;
long maxStamp = Long.MAX_VALUE;
if (timeRange.hasFrom()) {
minStamp = timeRange.getFrom();
}
if (timeRange.hasTo()) {
maxStamp = timeRange.getTo();
}
get.setTimeRange(minStamp, maxStamp);
TimeRange timeRange = protoToTimeRange(proto.getTimeRange());
get.setTimeRange(timeRange.getMin(), timeRange.getMax());
}
if (proto.hasFilter()) {
FilterProtos.Filter filter = proto.getFilter();
@ -829,16 +828,8 @@ public final class ProtobufUtil {
}
}
if (proto.hasTimeRange()) {
HBaseProtos.TimeRange timeRange = proto.getTimeRange();
long minStamp = 0;
long maxStamp = Long.MAX_VALUE;
if (timeRange.hasFrom()) {
minStamp = timeRange.getFrom();
}
if (timeRange.hasTo()) {
maxStamp = timeRange.getTo();
}
increment.setTimeRange(minStamp, maxStamp);
TimeRange timeRange = protoToTimeRange(proto.getTimeRange());
increment.setTimeRange(timeRange.getMin(), timeRange.getMax());
}
increment.setDurability(toDurability(proto.getDurability()));
for (NameBytesPair attribute : proto.getAttributeList()) {
@ -876,6 +867,12 @@ public final class ProtobufUtil {
scanBuilder.setLoadColumnFamiliesOnDemand(loadColumnFamiliesOnDemand.booleanValue());
}
scanBuilder.setMaxVersions(scan.getMaxVersions());
for (Entry<byte[], TimeRange> cftr : scan.getColumnFamilyTimeRange().entrySet()) {
HBaseProtos.ColumnFamilyTimeRange.Builder b = HBaseProtos.ColumnFamilyTimeRange.newBuilder();
b.setColumnFamily(ByteString.copyFrom(cftr.getKey()));
b.setTimeRange(timeRangeToProto(cftr.getValue()));
scanBuilder.addCfTimeRange(b);
}
TimeRange timeRange = scan.getTimeRange();
if (!timeRange.isAllTime()) {
HBaseProtos.TimeRange.Builder timeRangeBuilder =
@ -970,17 +967,16 @@ public final class ProtobufUtil {
if (proto.hasLoadColumnFamiliesOnDemand()) {
scan.setLoadColumnFamiliesOnDemand(proto.getLoadColumnFamiliesOnDemand());
}
if (proto.getCfTimeRangeCount() > 0) {
for (HBaseProtos.ColumnFamilyTimeRange cftr : proto.getCfTimeRangeList()) {
TimeRange timeRange = protoToTimeRange(cftr.getTimeRange());
scan.setColumnFamilyTimeRange(cftr.getColumnFamily().toByteArray(),
timeRange.getMin(), timeRange.getMax());
}
}
if (proto.hasTimeRange()) {
HBaseProtos.TimeRange timeRange = proto.getTimeRange();
long minStamp = 0;
long maxStamp = Long.MAX_VALUE;
if (timeRange.hasFrom()) {
minStamp = timeRange.getFrom();
}
if (timeRange.hasTo()) {
maxStamp = timeRange.getTo();
}
scan.setTimeRange(minStamp, maxStamp);
TimeRange timeRange = protoToTimeRange(proto.getTimeRange());
scan.setTimeRange(timeRange.getMin(), timeRange.getMax());
}
if (proto.hasFilter()) {
FilterProtos.Filter filter = proto.getFilter();
@ -1042,6 +1038,12 @@ public final class ProtobufUtil {
if (get.getFilter() != null) {
builder.setFilter(ProtobufUtil.toFilter(get.getFilter()));
}
for (Entry<byte[], TimeRange> cftr : get.getColumnFamilyTimeRange().entrySet()) {
HBaseProtos.ColumnFamilyTimeRange.Builder b = HBaseProtos.ColumnFamilyTimeRange.newBuilder();
b.setColumnFamily(ByteString.copyFrom(cftr.getKey()));
b.setTimeRange(timeRangeToProto(cftr.getValue()));
builder.addCfTimeRange(b);
}
TimeRange timeRange = get.getTimeRange();
if (!timeRange.isAllTime()) {
HBaseProtos.TimeRange.Builder timeRangeBuilder =
@ -3162,4 +3164,25 @@ public final class ProtobufUtil {
}
return scList;
}
private static HBaseProtos.TimeRange.Builder timeRangeToProto(TimeRange timeRange) {
HBaseProtos.TimeRange.Builder timeRangeBuilder =
HBaseProtos.TimeRange.newBuilder();
timeRangeBuilder.setFrom(timeRange.getMin());
timeRangeBuilder.setTo(timeRange.getMax());
return timeRangeBuilder;
}
private static TimeRange protoToTimeRange(HBaseProtos.TimeRange timeRange) throws IOException {
long minStamp = 0;
long maxStamp = Long.MAX_VALUE;
if (timeRange.hasFrom()) {
minStamp = timeRange.getFrom();
}
if (timeRange.hasTo()) {
maxStamp = timeRange.getTo();
}
return new TimeRange(minStamp, maxStamp);
}
}

View File

@ -68,16 +68,15 @@ public class TimeRange {
* Represents interval [minStamp, maxStamp)
* @param minStamp the minimum timestamp, inclusive
* @param maxStamp the maximum timestamp, exclusive
* @throws IOException
* @throws IllegalArgumentException
*/
public TimeRange(long minStamp, long maxStamp)
throws IOException {
public TimeRange(long minStamp, long maxStamp) {
if (minStamp < 0 || maxStamp < 0) {
throw new IllegalArgumentException("Timestamp cannot be negative. minStamp:" + minStamp
+ ", maxStamp:" + maxStamp);
}
if(maxStamp < minStamp) {
throw new IOException("maxStamp is smaller than minStamp");
throw new IllegalArgumentException("maxStamp is smaller than minStamp");
}
this.minStamp = minStamp;
this.maxStamp = maxStamp;

View File

@ -7974,6 +7974,668 @@ public final class HBaseProtos {
// @@protoc_insertion_point(class_scope:hbase.pb.TimeRange)
}
public interface ColumnFamilyTimeRangeOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required bytes column_family = 1;
/**
* <code>required bytes column_family = 1;</code>
*/
boolean hasColumnFamily();
/**
* <code>required bytes column_family = 1;</code>
*/
com.google.protobuf.ByteString getColumnFamily();
// required .hbase.pb.TimeRange time_range = 2;
/**
* <code>required .hbase.pb.TimeRange time_range = 2;</code>
*/
boolean hasTimeRange();
/**
* <code>required .hbase.pb.TimeRange time_range = 2;</code>
*/
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange getTimeRange();
/**
* <code>required .hbase.pb.TimeRange time_range = 2;</code>
*/
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRangeOrBuilder getTimeRangeOrBuilder();
}
/**
* Protobuf type {@code hbase.pb.ColumnFamilyTimeRange}
*
* <pre>
* ColumnFamily Specific TimeRange
* </pre>
*/
public static final class ColumnFamilyTimeRange extends
com.google.protobuf.GeneratedMessage
implements ColumnFamilyTimeRangeOrBuilder {
// Use ColumnFamilyTimeRange.newBuilder() to construct.
private ColumnFamilyTimeRange(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private ColumnFamilyTimeRange(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final ColumnFamilyTimeRange defaultInstance;
public static ColumnFamilyTimeRange getDefaultInstance() {
return defaultInstance;
}
public ColumnFamilyTimeRange getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private ColumnFamilyTimeRange(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
bitField0_ |= 0x00000001;
columnFamily_ = input.readBytes();
break;
}
case 18: {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.Builder subBuilder = null;
if (((bitField0_ & 0x00000002) == 0x00000002)) {
subBuilder = timeRange_.toBuilder();
}
timeRange_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(timeRange_);
timeRange_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000002;
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_ColumnFamilyTimeRange_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_ColumnFamilyTimeRange_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange.Builder.class);
}
public static com.google.protobuf.Parser<ColumnFamilyTimeRange> PARSER =
new com.google.protobuf.AbstractParser<ColumnFamilyTimeRange>() {
public ColumnFamilyTimeRange parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new ColumnFamilyTimeRange(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser<ColumnFamilyTimeRange> getParserForType() {
return PARSER;
}
private int bitField0_;
// required bytes column_family = 1;
public static final int COLUMN_FAMILY_FIELD_NUMBER = 1;
private com.google.protobuf.ByteString columnFamily_;
/**
* <code>required bytes column_family = 1;</code>
*/
public boolean hasColumnFamily() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* <code>required bytes column_family = 1;</code>
*/
public com.google.protobuf.ByteString getColumnFamily() {
return columnFamily_;
}
// required .hbase.pb.TimeRange time_range = 2;
public static final int TIME_RANGE_FIELD_NUMBER = 2;
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange timeRange_;
/**
* <code>required .hbase.pb.TimeRange time_range = 2;</code>
*/
public boolean hasTimeRange() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* <code>required .hbase.pb.TimeRange time_range = 2;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange getTimeRange() {
return timeRange_;
}
/**
* <code>required .hbase.pb.TimeRange time_range = 2;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRangeOrBuilder getTimeRangeOrBuilder() {
return timeRange_;
}
private void initFields() {
columnFamily_ = com.google.protobuf.ByteString.EMPTY;
timeRange_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasColumnFamily()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasTimeRange()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, columnFamily_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeMessage(2, timeRange_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(1, columnFamily_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(2, timeRange_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange)) {
return super.equals(obj);
}
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange) obj;
boolean result = true;
result = result && (hasColumnFamily() == other.hasColumnFamily());
if (hasColumnFamily()) {
result = result && getColumnFamily()
.equals(other.getColumnFamily());
}
result = result && (hasTimeRange() == other.hasTimeRange());
if (hasTimeRange()) {
result = result && getTimeRange()
.equals(other.getTimeRange());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasColumnFamily()) {
hash = (37 * hash) + COLUMN_FAMILY_FIELD_NUMBER;
hash = (53 * hash) + getColumnFamily().hashCode();
}
if (hasTimeRange()) {
hash = (37 * hash) + TIME_RANGE_FIELD_NUMBER;
hash = (53 * hash) + getTimeRange().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hbase.pb.ColumnFamilyTimeRange}
*
* <pre>
* ColumnFamily Specific TimeRange
* </pre>
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder<Builder>
implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRangeOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_ColumnFamilyTimeRange_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_ColumnFamilyTimeRange_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange.Builder.class);
}
// Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getTimeRangeFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
columnFamily_ = com.google.protobuf.ByteString.EMPTY;
bitField0_ = (bitField0_ & ~0x00000001);
if (timeRangeBuilder_ == null) {
timeRange_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.getDefaultInstance();
} else {
timeRangeBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_ColumnFamilyTimeRange_descriptor;
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange getDefaultInstanceForType() {
return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange.getDefaultInstance();
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange build() {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange buildPartial() {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.columnFamily_ = columnFamily_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
if (timeRangeBuilder_ == null) {
result.timeRange_ = timeRange_;
} else {
result.timeRange_ = timeRangeBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange) {
return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange other) {
if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange.getDefaultInstance()) return this;
if (other.hasColumnFamily()) {
setColumnFamily(other.getColumnFamily());
}
if (other.hasTimeRange()) {
mergeTimeRange(other.getTimeRange());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasColumnFamily()) {
return false;
}
if (!hasTimeRange()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required bytes column_family = 1;
private com.google.protobuf.ByteString columnFamily_ = com.google.protobuf.ByteString.EMPTY;
/**
* <code>required bytes column_family = 1;</code>
*/
public boolean hasColumnFamily() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* <code>required bytes column_family = 1;</code>
*/
public com.google.protobuf.ByteString getColumnFamily() {
return columnFamily_;
}
/**
* <code>required bytes column_family = 1;</code>
*/
public Builder setColumnFamily(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
columnFamily_ = value;
onChanged();
return this;
}
/**
* <code>required bytes column_family = 1;</code>
*/
public Builder clearColumnFamily() {
bitField0_ = (bitField0_ & ~0x00000001);
columnFamily_ = getDefaultInstance().getColumnFamily();
onChanged();
return this;
}
// required .hbase.pb.TimeRange time_range = 2;
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange timeRange_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRangeOrBuilder> timeRangeBuilder_;
/**
* <code>required .hbase.pb.TimeRange time_range = 2;</code>
*/
public boolean hasTimeRange() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* <code>required .hbase.pb.TimeRange time_range = 2;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange getTimeRange() {
if (timeRangeBuilder_ == null) {
return timeRange_;
} else {
return timeRangeBuilder_.getMessage();
}
}
/**
* <code>required .hbase.pb.TimeRange time_range = 2;</code>
*/
public Builder setTimeRange(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange value) {
if (timeRangeBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
timeRange_ = value;
onChanged();
} else {
timeRangeBuilder_.setMessage(value);
}
bitField0_ |= 0x00000002;
return this;
}
/**
* <code>required .hbase.pb.TimeRange time_range = 2;</code>
*/
public Builder setTimeRange(
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.Builder builderForValue) {
if (timeRangeBuilder_ == null) {
timeRange_ = builderForValue.build();
onChanged();
} else {
timeRangeBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000002;
return this;
}
/**
* <code>required .hbase.pb.TimeRange time_range = 2;</code>
*/
public Builder mergeTimeRange(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange value) {
if (timeRangeBuilder_ == null) {
if (((bitField0_ & 0x00000002) == 0x00000002) &&
timeRange_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.getDefaultInstance()) {
timeRange_ =
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.newBuilder(timeRange_).mergeFrom(value).buildPartial();
} else {
timeRange_ = value;
}
onChanged();
} else {
timeRangeBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000002;
return this;
}
/**
* <code>required .hbase.pb.TimeRange time_range = 2;</code>
*/
public Builder clearTimeRange() {
if (timeRangeBuilder_ == null) {
timeRange_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.getDefaultInstance();
onChanged();
} else {
timeRangeBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
/**
* <code>required .hbase.pb.TimeRange time_range = 2;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.Builder getTimeRangeBuilder() {
bitField0_ |= 0x00000002;
onChanged();
return getTimeRangeFieldBuilder().getBuilder();
}
/**
* <code>required .hbase.pb.TimeRange time_range = 2;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRangeOrBuilder getTimeRangeOrBuilder() {
if (timeRangeBuilder_ != null) {
return timeRangeBuilder_.getMessageOrBuilder();
} else {
return timeRange_;
}
}
/**
* <code>required .hbase.pb.TimeRange time_range = 2;</code>
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRangeOrBuilder>
getTimeRangeFieldBuilder() {
if (timeRangeBuilder_ == null) {
timeRangeBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRangeOrBuilder>(
timeRange_,
getParentForChildren(),
isClean());
timeRange_ = null;
}
return timeRangeBuilder_;
}
// @@protoc_insertion_point(builder_scope:hbase.pb.ColumnFamilyTimeRange)
}
static {
defaultInstance = new ColumnFamilyTimeRange(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hbase.pb.ColumnFamilyTimeRange)
}
public interface ServerNameOrBuilder
extends com.google.protobuf.MessageOrBuilder {
@ -19300,6 +19962,11 @@ public final class HBaseProtos {
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hbase_pb_TimeRange_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hbase_pb_ColumnFamilyTimeRange_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hbase_pb_ColumnFamilyTimeRange_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hbase_pb_ServerName_descriptor;
private static
@ -19414,42 +20081,44 @@ public final class HBaseProtos {
"ionSpecifier.RegionSpecifierType\022\r\n\005valu" +
"e\030\002 \002(\014\"?\n\023RegionSpecifierType\022\017\n\013REGION" +
"_NAME\020\001\022\027\n\023ENCODED_REGION_NAME\020\002\"%\n\tTime" +
"Range\022\014\n\004from\030\001 \001(\004\022\n\n\002to\030\002 \001(\004\"A\n\nServe" +
"rName\022\021\n\thost_name\030\001 \002(\t\022\014\n\004port\030\002 \001(\r\022\022" +
"\n\nstart_code\030\003 \001(\004\"\033\n\013Coprocessor\022\014\n\004nam" +
"e\030\001 \002(\t\"-\n\016NameStringPair\022\014\n\004name\030\001 \002(\t\022" +
"\r\n\005value\030\002 \002(\t\",\n\rNameBytesPair\022\014\n\004name\030",
"\001 \002(\t\022\r\n\005value\030\002 \001(\014\"/\n\016BytesBytesPair\022\r" +
"\n\005first\030\001 \002(\014\022\016\n\006second\030\002 \002(\014\",\n\rNameInt" +
"64Pair\022\014\n\004name\030\001 \001(\t\022\r\n\005value\030\002 \001(\003\"\325\001\n\023" +
"SnapshotDescription\022\014\n\004name\030\001 \002(\t\022\r\n\005tab" +
"le\030\002 \001(\t\022\030\n\rcreation_time\030\003 \001(\003:\0010\0227\n\004ty" +
"pe\030\004 \001(\0162\".hbase.pb.SnapshotDescription." +
"Type:\005FLUSH\022\017\n\007version\030\005 \001(\005\022\r\n\005owner\030\006 " +
"\001(\t\".\n\004Type\022\014\n\010DISABLED\020\000\022\t\n\005FLUSH\020\001\022\r\n\t" +
"SKIPFLUSH\020\002\"\206\001\n\024ProcedureDescription\022\021\n\t" +
"signature\030\001 \002(\t\022\020\n\010instance\030\002 \001(\t\022\030\n\rcre",
"ation_time\030\003 \001(\003:\0010\022/\n\rconfiguration\030\004 \003" +
"(\0132\030.hbase.pb.NameStringPair\"\n\n\010EmptyMsg" +
"\"\033\n\007LongMsg\022\020\n\010long_msg\030\001 \002(\003\"\037\n\tDoubleM" +
"sg\022\022\n\ndouble_msg\030\001 \002(\001\"\'\n\rBigDecimalMsg\022" +
"\026\n\016bigdecimal_msg\030\001 \002(\014\"5\n\004UUID\022\026\n\016least" +
"_sig_bits\030\001 \002(\004\022\025\n\rmost_sig_bits\030\002 \002(\004\"T" +
"\n\023NamespaceDescriptor\022\014\n\004name\030\001 \002(\014\022/\n\rc" +
"onfiguration\030\002 \003(\0132\030.hbase.pb.NameString" +
"Pair\"o\n\013VersionInfo\022\017\n\007version\030\001 \002(\t\022\013\n\003" +
"url\030\002 \002(\t\022\020\n\010revision\030\003 \002(\t\022\014\n\004user\030\004 \002(",
"\t\022\014\n\004date\030\005 \002(\t\022\024\n\014src_checksum\030\006 \002(\t\"Q\n" +
"\020RegionServerInfo\022\020\n\010infoPort\030\001 \001(\005\022+\n\014v" +
"ersion_info\030\002 \001(\0132\025.hbase.pb.VersionInfo" +
"*r\n\013CompareType\022\010\n\004LESS\020\000\022\021\n\rLESS_OR_EQU" +
"AL\020\001\022\t\n\005EQUAL\020\002\022\r\n\tNOT_EQUAL\020\003\022\024\n\020GREATE" +
"R_OR_EQUAL\020\004\022\013\n\007GREATER\020\005\022\t\n\005NO_OP\020\006*n\n\010" +
"TimeUnit\022\017\n\013NANOSECONDS\020\001\022\020\n\014MICROSECOND" +
"S\020\002\022\020\n\014MILLISECONDS\020\003\022\013\n\007SECONDS\020\004\022\013\n\007MI" +
"NUTES\020\005\022\t\n\005HOURS\020\006\022\010\n\004DAYS\020\007B>\n*org.apac" +
"he.hadoop.hbase.protobuf.generatedB\013HBas",
"eProtosH\001\240\001\001"
"Range\022\014\n\004from\030\001 \001(\004\022\n\n\002to\030\002 \001(\004\"W\n\025Colum" +
"nFamilyTimeRange\022\025\n\rcolumn_family\030\001 \002(\014\022" +
"\'\n\ntime_range\030\002 \002(\0132\023.hbase.pb.TimeRange" +
"\"A\n\nServerName\022\021\n\thost_name\030\001 \002(\t\022\014\n\004por" +
"t\030\002 \001(\r\022\022\n\nstart_code\030\003 \001(\004\"\033\n\013Coprocess",
"or\022\014\n\004name\030\001 \002(\t\"-\n\016NameStringPair\022\014\n\004na" +
"me\030\001 \002(\t\022\r\n\005value\030\002 \002(\t\",\n\rNameBytesPair" +
"\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \001(\014\"/\n\016BytesBy" +
"tesPair\022\r\n\005first\030\001 \002(\014\022\016\n\006second\030\002 \002(\014\"," +
"\n\rNameInt64Pair\022\014\n\004name\030\001 \001(\t\022\r\n\005value\030\002" +
" \001(\003\"\325\001\n\023SnapshotDescription\022\014\n\004name\030\001 \002" +
"(\t\022\r\n\005table\030\002 \001(\t\022\030\n\rcreation_time\030\003 \001(\003" +
":\0010\0227\n\004type\030\004 \001(\0162\".hbase.pb.SnapshotDes" +
"cription.Type:\005FLUSH\022\017\n\007version\030\005 \001(\005\022\r\n" +
"\005owner\030\006 \001(\t\".\n\004Type\022\014\n\010DISABLED\020\000\022\t\n\005FL",
"USH\020\001\022\r\n\tSKIPFLUSH\020\002\"\206\001\n\024ProcedureDescri" +
"ption\022\021\n\tsignature\030\001 \002(\t\022\020\n\010instance\030\002 \001" +
"(\t\022\030\n\rcreation_time\030\003 \001(\003:\0010\022/\n\rconfigur" +
"ation\030\004 \003(\0132\030.hbase.pb.NameStringPair\"\n\n" +
"\010EmptyMsg\"\033\n\007LongMsg\022\020\n\010long_msg\030\001 \002(\003\"\037" +
"\n\tDoubleMsg\022\022\n\ndouble_msg\030\001 \002(\001\"\'\n\rBigDe" +
"cimalMsg\022\026\n\016bigdecimal_msg\030\001 \002(\014\"5\n\004UUID" +
"\022\026\n\016least_sig_bits\030\001 \002(\004\022\025\n\rmost_sig_bit" +
"s\030\002 \002(\004\"T\n\023NamespaceDescriptor\022\014\n\004name\030\001" +
" \002(\014\022/\n\rconfiguration\030\002 \003(\0132\030.hbase.pb.N",
"ameStringPair\"o\n\013VersionInfo\022\017\n\007version\030" +
"\001 \002(\t\022\013\n\003url\030\002 \002(\t\022\020\n\010revision\030\003 \002(\t\022\014\n\004" +
"user\030\004 \002(\t\022\014\n\004date\030\005 \002(\t\022\024\n\014src_checksum" +
"\030\006 \002(\t\"Q\n\020RegionServerInfo\022\020\n\010infoPort\030\001" +
" \001(\005\022+\n\014version_info\030\002 \001(\0132\025.hbase.pb.Ve" +
"rsionInfo*r\n\013CompareType\022\010\n\004LESS\020\000\022\021\n\rLE" +
"SS_OR_EQUAL\020\001\022\t\n\005EQUAL\020\002\022\r\n\tNOT_EQUAL\020\003\022" +
"\024\n\020GREATER_OR_EQUAL\020\004\022\013\n\007GREATER\020\005\022\t\n\005NO" +
"_OP\020\006*n\n\010TimeUnit\022\017\n\013NANOSECONDS\020\001\022\020\n\014MI" +
"CROSECONDS\020\002\022\020\n\014MILLISECONDS\020\003\022\013\n\007SECOND",
"S\020\004\022\013\n\007MINUTES\020\005\022\t\n\005HOURS\020\006\022\010\n\004DAYS\020\007B>\n" +
"*org.apache.hadoop.hbase.protobuf.genera" +
"tedB\013HBaseProtosH\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@ -19510,98 +20179,104 @@ public final class HBaseProtos {
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_TimeRange_descriptor,
new java.lang.String[] { "From", "To", });
internal_static_hbase_pb_ServerName_descriptor =
internal_static_hbase_pb_ColumnFamilyTimeRange_descriptor =
getDescriptor().getMessageTypes().get(9);
internal_static_hbase_pb_ColumnFamilyTimeRange_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_ColumnFamilyTimeRange_descriptor,
new java.lang.String[] { "ColumnFamily", "TimeRange", });
internal_static_hbase_pb_ServerName_descriptor =
getDescriptor().getMessageTypes().get(10);
internal_static_hbase_pb_ServerName_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_ServerName_descriptor,
new java.lang.String[] { "HostName", "Port", "StartCode", });
internal_static_hbase_pb_Coprocessor_descriptor =
getDescriptor().getMessageTypes().get(10);
getDescriptor().getMessageTypes().get(11);
internal_static_hbase_pb_Coprocessor_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_Coprocessor_descriptor,
new java.lang.String[] { "Name", });
internal_static_hbase_pb_NameStringPair_descriptor =
getDescriptor().getMessageTypes().get(11);
getDescriptor().getMessageTypes().get(12);
internal_static_hbase_pb_NameStringPair_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_NameStringPair_descriptor,
new java.lang.String[] { "Name", "Value", });
internal_static_hbase_pb_NameBytesPair_descriptor =
getDescriptor().getMessageTypes().get(12);
getDescriptor().getMessageTypes().get(13);
internal_static_hbase_pb_NameBytesPair_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_NameBytesPair_descriptor,
new java.lang.String[] { "Name", "Value", });
internal_static_hbase_pb_BytesBytesPair_descriptor =
getDescriptor().getMessageTypes().get(13);
getDescriptor().getMessageTypes().get(14);
internal_static_hbase_pb_BytesBytesPair_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_BytesBytesPair_descriptor,
new java.lang.String[] { "First", "Second", });
internal_static_hbase_pb_NameInt64Pair_descriptor =
getDescriptor().getMessageTypes().get(14);
getDescriptor().getMessageTypes().get(15);
internal_static_hbase_pb_NameInt64Pair_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_NameInt64Pair_descriptor,
new java.lang.String[] { "Name", "Value", });
internal_static_hbase_pb_SnapshotDescription_descriptor =
getDescriptor().getMessageTypes().get(15);
getDescriptor().getMessageTypes().get(16);
internal_static_hbase_pb_SnapshotDescription_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_SnapshotDescription_descriptor,
new java.lang.String[] { "Name", "Table", "CreationTime", "Type", "Version", "Owner", });
internal_static_hbase_pb_ProcedureDescription_descriptor =
getDescriptor().getMessageTypes().get(16);
getDescriptor().getMessageTypes().get(17);
internal_static_hbase_pb_ProcedureDescription_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_ProcedureDescription_descriptor,
new java.lang.String[] { "Signature", "Instance", "CreationTime", "Configuration", });
internal_static_hbase_pb_EmptyMsg_descriptor =
getDescriptor().getMessageTypes().get(17);
getDescriptor().getMessageTypes().get(18);
internal_static_hbase_pb_EmptyMsg_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_EmptyMsg_descriptor,
new java.lang.String[] { });
internal_static_hbase_pb_LongMsg_descriptor =
getDescriptor().getMessageTypes().get(18);
getDescriptor().getMessageTypes().get(19);
internal_static_hbase_pb_LongMsg_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_LongMsg_descriptor,
new java.lang.String[] { "LongMsg", });
internal_static_hbase_pb_DoubleMsg_descriptor =
getDescriptor().getMessageTypes().get(19);
getDescriptor().getMessageTypes().get(20);
internal_static_hbase_pb_DoubleMsg_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_DoubleMsg_descriptor,
new java.lang.String[] { "DoubleMsg", });
internal_static_hbase_pb_BigDecimalMsg_descriptor =
getDescriptor().getMessageTypes().get(20);
getDescriptor().getMessageTypes().get(21);
internal_static_hbase_pb_BigDecimalMsg_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_BigDecimalMsg_descriptor,
new java.lang.String[] { "BigdecimalMsg", });
internal_static_hbase_pb_UUID_descriptor =
getDescriptor().getMessageTypes().get(21);
getDescriptor().getMessageTypes().get(22);
internal_static_hbase_pb_UUID_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_UUID_descriptor,
new java.lang.String[] { "LeastSigBits", "MostSigBits", });
internal_static_hbase_pb_NamespaceDescriptor_descriptor =
getDescriptor().getMessageTypes().get(22);
getDescriptor().getMessageTypes().get(23);
internal_static_hbase_pb_NamespaceDescriptor_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_NamespaceDescriptor_descriptor,
new java.lang.String[] { "Name", "Configuration", });
internal_static_hbase_pb_VersionInfo_descriptor =
getDescriptor().getMessageTypes().get(23);
getDescriptor().getMessageTypes().get(24);
internal_static_hbase_pb_VersionInfo_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_VersionInfo_descriptor,
new java.lang.String[] { "Version", "Url", "Revision", "User", "Date", "SrcChecksum", });
internal_static_hbase_pb_RegionServerInfo_descriptor =
getDescriptor().getMessageTypes().get(24);
getDescriptor().getMessageTypes().get(25);
internal_static_hbase_pb_RegionServerInfo_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_RegionServerInfo_descriptor,

View File

@ -82,6 +82,7 @@ message Get {
optional bool existence_only = 10 [default = false];
optional Consistency consistency = 12 [default = STRONG];
repeated ColumnFamilyTimeRange cf_time_range = 13;
}
message Result {
@ -252,6 +253,7 @@ message Scan {
optional Consistency consistency = 16 [default = STRONG];
optional uint32 caching = 17;
optional bool allow_partial_results = 18;
repeated ColumnFamilyTimeRange cf_time_range = 19;
}
/**

View File

@ -123,6 +123,12 @@ message TimeRange {
optional uint64 to = 2;
}
/* ColumnFamily Specific TimeRange */
message ColumnFamilyTimeRange {
required bytes column_family = 1;
required TimeRange time_range = 2;
}
/* Comparison operators */
enum CompareType {
LESS = 0;

View File

@ -827,8 +827,7 @@ public class DefaultMemStore implements MemStore {
}
@Override
public boolean shouldUseScanner(Scan scan, SortedSet<byte[]> columns,
long oldestUnexpiredTS) {
public boolean shouldUseScanner(Scan scan, Store store, long oldestUnexpiredTS) {
return shouldSeek(scan, oldestUnexpiredTS);
}

View File

@ -19,7 +19,6 @@
package org.apache.hadoop.hbase.regionserver;
import java.io.IOException;
import java.util.SortedSet;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.Cell;
@ -76,15 +75,12 @@ public interface KeyValueScanner extends Shipper {
* Allows to filter out scanners (both StoreFile and memstore) that we don't
* want to use based on criteria such as Bloom filters and timestamp ranges.
* @param scan the scan that we are selecting scanners for
* @param columns the set of columns in the current column family, or null if
* not specified by the scan
* @param store the store we are performing the scan on.
* @param oldestUnexpiredTS the oldest timestamp we are interested in for
* this query, based on TTL
* @return true if the scanner should be included in the query
*/
boolean shouldUseScanner(
Scan scan, SortedSet<byte[]> columns, long oldestUnexpiredTS
);
boolean shouldUseScanner(Scan scan, Store store, long oldestUnexpiredTS);
// "Lazy scanner" optimizations

View File

@ -19,7 +19,6 @@
package org.apache.hadoop.hbase.regionserver;
import java.io.IOException;
import java.util.SortedSet;
import org.apache.commons.lang.NotImplementedException;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
@ -56,8 +55,7 @@ public abstract class NonLazyKeyValueScanner implements KeyValueScanner {
}
@Override
public boolean shouldUseScanner(Scan scan, SortedSet<byte[]> columns,
long oldestUnexpiredTS) {
public boolean shouldUseScanner(Scan scan, Store store, long oldestUnexpiredTS) {
// No optimizations implemented by default.
return true;
}

View File

@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
import org.apache.hadoop.hbase.io.TimeRange;
import org.apache.hadoop.hbase.io.hfile.BlockType;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.io.hfile.HFile;
@ -1239,16 +1240,16 @@ public class StoreFile {
/**
* Check if this storeFile may contain keys within the TimeRange that
* have not expired (i.e. not older than oldestUnexpiredTS).
* @param scan the current scan
* @param timeRange the timeRange to restrict
* @param oldestUnexpiredTS the oldest timestamp that is not expired, as
* determined by the column family's TTL
* @return false if queried keys definitely don't exist in this StoreFile
*/
boolean passesTimerangeFilter(Scan scan, long oldestUnexpiredTS) {
boolean passesTimerangeFilter(TimeRange timeRange, long oldestUnexpiredTS) {
if (timeRangeTracker == null) {
return true;
} else {
return timeRangeTracker.includesTimeRange(scan.getTimeRange()) &&
return timeRangeTracker.includesTimeRange(timeRange) &&
timeRangeTracker.getMaximumTimestamp() >= oldestUnexpiredTS;
}
}

View File

@ -24,7 +24,6 @@ import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.SortedSet;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
@ -35,6 +34,7 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.TimeRange;
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
import org.apache.hadoop.hbase.regionserver.StoreFile.Reader;
@ -62,7 +62,7 @@ public class StoreFileScanner implements KeyValueScanner {
private static AtomicLong seekCount;
private ScanQueryMatcher matcher;
private long readPt;
/**
@ -422,9 +422,15 @@ public class StoreFileScanner implements KeyValueScanner {
}
@Override
public boolean shouldUseScanner(Scan scan, SortedSet<byte[]> columns, long oldestUnexpiredTS) {
return reader.passesTimerangeFilter(scan, oldestUnexpiredTS)
&& reader.passesKeyRangeFilter(scan) && reader.passesBloomFilter(scan, columns);
public boolean shouldUseScanner(Scan scan, Store store, long oldestUnexpiredTS) {
// if the file has no entries, no need to validate or create a scanner.
byte[] cf = store.getFamily().getName();
TimeRange timeRange = scan.getColumnFamilyTimeRange().get(cf);
if (timeRange == null) {
timeRange = scan.getTimeRange();
}
return reader.passesTimerangeFilter(timeRange, oldestUnexpiredTS) && reader
.passesKeyRangeFilter(scan) && reader.passesBloomFilter(scan, scan.getFamilyMap().get(cf));
}
@Override

View File

@ -394,7 +394,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner
continue;
}
if (kvs.shouldUseScanner(scan, columns, expiredTimestampCutoff)) {
if (kvs.shouldUseScanner(scan, store, expiredTimestampCutoff)) {
scanners.add(kvs);
}
}

View File

@ -260,7 +260,7 @@ public class TestHFileWriterV2 {
// Static stuff used by various HFile v2 unit tests
private static final String COLUMN_FAMILY_NAME = "_-myColumnFamily-_";
public static final String COLUMN_FAMILY_NAME = "_-myColumnFamily-_";
private static final int MIN_ROW_OR_QUALIFIER_LENGTH = 64;
private static final int MAX_ROW_OR_QUALIFIER_LENGTH = 128;

View File

@ -23,13 +23,14 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Random;
import java.util.TreeSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@ -39,6 +40,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.testclassification.MediumTests;
@ -278,16 +280,18 @@ public class TestCompoundBloomFilter {
private boolean isInBloom(StoreFileScanner scanner, byte[] row, BloomType bt,
Random rand) {
return isInBloom(scanner, row,
TestHFileWriterV2.randomRowOrQualifier(rand));
return isInBloom(scanner, row, TestHFileWriterV2.randomRowOrQualifier(rand));
}
private boolean isInBloom(StoreFileScanner scanner, byte[] row,
byte[] qualifier) {
Scan scan = new Scan(row, row);
TreeSet<byte[]> columns = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
columns.add(qualifier);
return scanner.shouldUseScanner(scan, columns, Long.MIN_VALUE);
scan.addColumn(Bytes.toBytes(TestHFileWriterV2.COLUMN_FAMILY_NAME), qualifier);
Store store = mock(Store.class);
HColumnDescriptor hcd = mock(HColumnDescriptor.class);
when(hcd.getName()).thenReturn(Bytes.toBytes(TestHFileWriterV2.COLUMN_FAMILY_NAME));
when(store.getFamily()).thenReturn(hcd);
return scanner.shouldUseScanner(scan, store, Long.MIN_VALUE);
}
private Path writeStoreFile(int t, BloomType bt, List<KeyValue> kvs)

View File

@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseTestCase;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.KeyValue;
@ -69,6 +70,10 @@ import com.google.common.base.Joiner;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.when;
/**
* Test HStoreFile
*/
@ -198,6 +203,21 @@ public class TestStoreFile extends HBaseTestCase {
finalRow.length));
}
@Test
public void testEmptyStoreFileRestrictKeyRanges() throws Exception {
StoreFile.Reader reader = mock(StoreFile.Reader.class);
Store store = mock(Store.class);
HColumnDescriptor hcd = mock(HColumnDescriptor.class);
byte[] cf = Bytes.toBytes("ty");
when(hcd.getName()).thenReturn(cf);
when(store.getFamily()).thenReturn(hcd);
StoreFileScanner scanner =
new StoreFileScanner(reader, mock(HFileScanner.class), false, false, 0);
Scan scan = new Scan();
scan.setColumnFamilyTimeRange(cf, 0, 1);
assertFalse(scanner.shouldUseScanner(scan, store, 0));
}
@Test
public void testHFileLink() throws IOException {
final HRegionInfo hri = new HRegionInfo(TableName.valueOf("testHFileLinkTb"));
@ -493,7 +513,11 @@ public class TestStoreFile extends HBaseTestCase {
Scan scan = new Scan(row.getBytes(),row.getBytes());
scan.addColumn("family".getBytes(), "family:col".getBytes());
boolean exists = scanner.shouldUseScanner(scan, columns, Long.MIN_VALUE);
Store store = mock(Store.class);
HColumnDescriptor hcd = mock(HColumnDescriptor.class);
when(hcd.getName()).thenReturn(Bytes.toBytes("family"));
when(store.getFamily()).thenReturn(hcd);
boolean exists = scanner.shouldUseScanner(scan, store, Long.MIN_VALUE);
if (i % 2 == 0) {
if (!exists) falseNeg++;
} else {
@ -504,9 +528,8 @@ public class TestStoreFile extends HBaseTestCase {
fs.delete(f, true);
assertEquals("False negatives: " + falseNeg, 0, falseNeg);
int maxFalsePos = (int) (2 * 2000 * err);
assertTrue("Too many false positives: " + falsePos + " (err=" + err
+ ", expected no more than " + maxFalsePos + ")",
falsePos <= maxFalsePos);
assertTrue("Too many false positives: " + falsePos + " (err=" + err + ", expected no more than "
+ maxFalsePos + ")", falsePos <= maxFalsePos);
}
private static final int BLOCKSIZE_SMALL = 8192;
@ -675,6 +698,10 @@ public class TestStoreFile extends HBaseTestCase {
StoreFileScanner scanner = reader.getStoreFileScanner(false, false);
assertEquals(expKeys[x], reader.generalBloomFilter.getKeyCount());
Store store = mock(Store.class);
HColumnDescriptor hcd = mock(HColumnDescriptor.class);
when(hcd.getName()).thenReturn(Bytes.toBytes("family"));
when(store.getFamily()).thenReturn(hcd);
// check false positives rate
int falsePos = 0;
int falseNeg = 0;
@ -687,8 +714,9 @@ public class TestStoreFile extends HBaseTestCase {
Scan scan = new Scan(row.getBytes(),row.getBytes());
scan.addColumn("family".getBytes(), ("col"+col).getBytes());
boolean exists =
scanner.shouldUseScanner(scan, columns, Long.MIN_VALUE);
scanner.shouldUseScanner(scan, store, Long.MIN_VALUE);
boolean shouldRowExist = i % 2 == 0;
boolean shouldColExist = j % 2 == 0;
shouldColExist = shouldColExist || bt[x] == BloomType.ROW;
@ -711,15 +739,12 @@ public class TestStoreFile extends HBaseTestCase {
@Test
public void testSeqIdComparator() {
assertOrdering(StoreFile.Comparators.SEQ_ID,
mockStoreFile(true, 100, 1000, -1, "/foo/123"),
mockStoreFile(true, 100, 1000, -1, "/foo/124"),
mockStoreFile(true, 99, 1000, -1, "/foo/126"),
mockStoreFile(true, 98, 2000, -1, "/foo/126"),
mockStoreFile(false, 3453, -1, 1, "/foo/1"),
mockStoreFile(false, 2, -1, 3, "/foo/2"),
mockStoreFile(false, 1000, -1, 5, "/foo/2"),
mockStoreFile(false, 76, -1, 5, "/foo/3"));
assertOrdering(StoreFile.Comparators.SEQ_ID, mockStoreFile(true, 100, 1000, -1, "/foo/123"),
mockStoreFile(true, 100, 1000, -1, "/foo/124"),
mockStoreFile(true, 99, 1000, -1, "/foo/126"),
mockStoreFile(true, 98, 2000, -1, "/foo/126"), mockStoreFile(false, 3453, -1, 1, "/foo/1"),
mockStoreFile(false, 2, -1, 3, "/foo/2"), mockStoreFile(false, 1000, -1, 5, "/foo/2"),
mockStoreFile(false, 76, -1, 5, "/foo/3"));
}
/**
@ -797,7 +822,7 @@ public class TestStoreFile extends HBaseTestCase {
Scan scan = new Scan();
// Make up a directory hierarchy that has a regiondir ("7e0102") and familyname.
Path storedir = new Path(new Path(testDir, "7e0102"), "familyname");
Path storedir = new Path(new Path(testDir, "7e0102"), Bytes.toString(family));
Path dir = new Path(storedir, "1234567890");
HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
// Make a store file and write data to it.
@ -807,7 +832,7 @@ public class TestStoreFile extends HBaseTestCase {
.build();
List<KeyValue> kvList = getKeyValueSet(timestamps,numRows,
family, qualifier);
qualifier, family);
for (KeyValue kv : kvList) {
writer.append(kv);
@ -817,26 +842,39 @@ public class TestStoreFile extends HBaseTestCase {
StoreFile hsf = new StoreFile(this.fs, writer.getPath(), conf, cacheConf,
BloomType.NONE);
Store store = mock(Store.class);
HColumnDescriptor hcd = mock(HColumnDescriptor.class);
when(hcd.getName()).thenReturn(family);
when(store.getFamily()).thenReturn(hcd);
StoreFile.Reader reader = hsf.createReader();
StoreFileScanner scanner = reader.getStoreFileScanner(false, false);
TreeSet<byte[]> columns = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
columns.add(qualifier);
scan.setTimeRange(20, 100);
assertTrue(scanner.shouldUseScanner(scan, columns, Long.MIN_VALUE));
assertTrue(scanner.shouldUseScanner(scan, store, Long.MIN_VALUE));
scan.setTimeRange(1, 2);
assertTrue(scanner.shouldUseScanner(scan, columns, Long.MIN_VALUE));
assertTrue(scanner.shouldUseScanner(scan, store, Long.MIN_VALUE));
scan.setTimeRange(8, 10);
assertTrue(scanner.shouldUseScanner(scan, columns, Long.MIN_VALUE));
assertTrue(scanner.shouldUseScanner(scan, store, Long.MIN_VALUE));
scan.setTimeRange(7, 50);
assertTrue(scanner.shouldUseScanner(scan, columns, Long.MIN_VALUE));
// lets make sure it still works with column family time ranges
scan.setColumnFamilyTimeRange(family, 7, 50);
assertTrue(scanner.shouldUseScanner(scan, store, Long.MIN_VALUE));
// This test relies on the timestamp range optimization
scan = new Scan();
scan.setTimeRange(27, 50);
assertTrue(!scanner.shouldUseScanner(scan, columns, Long.MIN_VALUE));
assertTrue(!scanner.shouldUseScanner(scan, store, Long.MIN_VALUE));
// should still use the scanner because we override the family time range
scan = new Scan();
scan.setTimeRange(27, 50);
scan.setColumnFamilyTimeRange(family, 7, 50);
assertTrue(scanner.shouldUseScanner(scan, store, Long.MIN_VALUE));
}
@Test