HBASE-13954 Remove HTableInterface#getRowOrBefore related server side code. (Ashish)
This commit is contained in:
parent
cceee1b0c3
commit
3b6db26863
|
@ -110,7 +110,6 @@ public class Get extends Query
|
|||
this.storeOffset = get.getRowOffsetPerColumnFamily();
|
||||
this.tr = get.getTimeRange();
|
||||
this.checkExistenceOnly = get.isCheckExistenceOnly();
|
||||
this.closestRowBefore = get.isClosestRowBefore();
|
||||
Map<byte[], NavigableSet<byte[]>> fams = get.getFamilyMap();
|
||||
for (Map.Entry<byte[],NavigableSet<byte[]>> entry : fams.entrySet()) {
|
||||
byte [] fam = entry.getKey();
|
||||
|
@ -137,12 +136,23 @@ public class Get extends Query
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* This will always return the default value which is false as client cannot set the value to this
|
||||
* property any more.
|
||||
* @deprecated since 2.0.0 and will be removed in 3.0.0
|
||||
*/
|
||||
@Deprecated
|
||||
public boolean isClosestRowBefore() {
|
||||
return closestRowBefore;
|
||||
}
|
||||
|
||||
/**
|
||||
* This is not used any more and does nothing. Use reverse scan instead.
|
||||
* @deprecated since 2.0.0 and will be removed in 3.0.0
|
||||
*/
|
||||
@Deprecated
|
||||
public Get setClosestRowBefore(boolean closestRowBefore) {
|
||||
this.closestRowBefore = closestRowBefore;
|
||||
// do Nothing
|
||||
return this;
|
||||
}
|
||||
|
||||
|
|
|
@ -937,24 +937,6 @@ public class Scan extends Query {
|
|||
return (Scan) super.setIsolationLevel(level);
|
||||
}
|
||||
|
||||
/**
|
||||
* Utility that creates a Scan that will do a small scan in reverse from passed row
|
||||
* looking for next closest row.
|
||||
* @param row
|
||||
* @param family
|
||||
* @return An instance of Scan primed with passed <code>row</code> and <code>family</code> to
|
||||
* scan in reverse for one row only.
|
||||
*/
|
||||
static Scan createGetClosestRowOrBeforeReverseScan(byte[] row) {
|
||||
// Below does not work if you add in family; need to add the family qualifier that is highest
|
||||
// possible family qualifier. Do we have such a notion? Would have to be magic.
|
||||
Scan scan = new Scan(row);
|
||||
scan.setSmall(true);
|
||||
scan.setReversed(true);
|
||||
scan.setCaching(1);
|
||||
return scan;
|
||||
}
|
||||
|
||||
/**
|
||||
* Enable collection of {@link ScanMetrics}. For advanced users.
|
||||
* @param enabled Set to true to enable accumulating scan metrics
|
||||
|
|
|
@ -97,7 +97,6 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServic
|
|||
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue;
|
||||
|
@ -122,12 +121,12 @@ import org.apache.hadoop.hbase.protobuf.generated.RPCProtos;
|
|||
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.WALProtos;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.BulkLoadDescriptor;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.FlushAction;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.EventType;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.BulkLoadDescriptor;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.StoreDescriptor;
|
||||
import org.apache.hadoop.hbase.quotas.QuotaScope;
|
||||
import org.apache.hadoop.hbase.quotas.QuotaType;
|
||||
|
@ -489,9 +488,6 @@ public final class ProtobufUtil {
|
|||
if (proto.hasExistenceOnly() && proto.getExistenceOnly()){
|
||||
get.setCheckExistenceOnly(true);
|
||||
}
|
||||
if (proto.hasClosestRowBefore() && proto.getClosestRowBefore()){
|
||||
get.setClosestRowBefore(true);
|
||||
}
|
||||
if (proto.hasConsistency()) {
|
||||
get.setConsistency(toConsistency(proto.getConsistency()));
|
||||
}
|
||||
|
@ -1077,9 +1073,6 @@ public final class ProtobufUtil {
|
|||
if (get.isCheckExistenceOnly()){
|
||||
builder.setExistenceOnly(true);
|
||||
}
|
||||
if (get.isClosestRowBefore()){
|
||||
builder.setClosestRowBefore(true);
|
||||
}
|
||||
if (get.getConsistency() != null && get.getConsistency() != Consistency.STRONG) {
|
||||
builder.setConsistency(toConsistency(get.getConsistency()));
|
||||
}
|
||||
|
@ -1549,33 +1542,6 @@ public final class ProtobufUtil {
|
|||
|
||||
// Start helpers for Client
|
||||
|
||||
/**
|
||||
* A helper to get a row of the closet one before using client protocol.
|
||||
*
|
||||
* @param client
|
||||
* @param regionName
|
||||
* @param row
|
||||
* @param family
|
||||
* @return the row or the closestRowBefore if it doesn't exist
|
||||
* @throws IOException
|
||||
* @deprecated since 0.99 - use reversed scanner instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public static Result getRowOrBefore(final ClientService.BlockingInterface client,
|
||||
final byte[] regionName, final byte[] row,
|
||||
final byte[] family) throws IOException {
|
||||
GetRequest request =
|
||||
RequestConverter.buildGetRowOrBeforeRequest(
|
||||
regionName, row, family);
|
||||
try {
|
||||
GetResponse response = client.get(null, request);
|
||||
if (!response.hasResult()) return null;
|
||||
return toResult(response.getResult());
|
||||
} catch (ServiceException se) {
|
||||
throw getRemoteException(se);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A helper to bulk load a list of HFiles using client protocol.
|
||||
*
|
||||
|
|
|
@ -64,7 +64,6 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionReques
|
|||
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest.FamilyPath;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Column;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Condition;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest;
|
||||
|
@ -123,35 +122,6 @@ public final class RequestConverter {
|
|||
|
||||
// Start utilities for Client
|
||||
|
||||
/**
|
||||
* Create a new protocol buffer GetRequest to get a row, all columns in a family.
|
||||
* If there is no such row, return the closest row before it.
|
||||
*
|
||||
* @param regionName the name of the region to get
|
||||
* @param row the row to get
|
||||
* @param family the column family to get
|
||||
* should return the immediate row before
|
||||
* @return a protocol buffer GetReuqest
|
||||
*/
|
||||
public static GetRequest buildGetRowOrBeforeRequest(
|
||||
final byte[] regionName, final byte[] row, final byte[] family) {
|
||||
GetRequest.Builder builder = GetRequest.newBuilder();
|
||||
RegionSpecifier region = buildRegionSpecifier(
|
||||
RegionSpecifierType.REGION_NAME, regionName);
|
||||
builder.setRegion(region);
|
||||
|
||||
Column.Builder columnBuilder = Column.newBuilder();
|
||||
columnBuilder.setFamily(ByteStringer.wrap(family));
|
||||
ClientProtos.Get.Builder getBuilder =
|
||||
ClientProtos.Get.newBuilder();
|
||||
getBuilder.setRow(ByteStringer.wrap(row));
|
||||
getBuilder.addColumn(columnBuilder.build());
|
||||
getBuilder.setClosestRowBefore(true);
|
||||
builder.setGet(getBuilder.build());
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Create a protocol buffer GetRequest for a client Get
|
||||
*
|
||||
|
|
|
@ -514,14 +514,6 @@ public class TestClientNoCluster extends Configured implements Tool {
|
|||
ClientProtos.Result.Builder resultBuilder = ClientProtos.Result.newBuilder();
|
||||
ByteString row = request.getGet().getRow();
|
||||
Pair<HRegionInfo, ServerName> p = meta.get(row.toByteArray());
|
||||
if (p == null) {
|
||||
if (request.getGet().getClosestRowBefore()) {
|
||||
byte [] bytes = row.toByteArray();
|
||||
SortedMap<byte [], Pair<HRegionInfo, ServerName>> head =
|
||||
bytes != null? meta.headMap(bytes): meta;
|
||||
p = head == null? null: head.get(head.lastKey());
|
||||
}
|
||||
}
|
||||
if (p != null) {
|
||||
resultBuilder.addCell(getRegionInfo(row, p.getFirst()));
|
||||
resultBuilder.addCell(getServer(row, p.getSecond()));
|
||||
|
|
|
@ -168,7 +168,6 @@ public class TestGet {
|
|||
get.setReplicaId(2);
|
||||
get.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
|
||||
get.setCheckExistenceOnly(true);
|
||||
get.setClosestRowBefore(true);
|
||||
get.setTimeRange(3, 4);
|
||||
get.setMaxVersions(11);
|
||||
get.setMaxResultsPerColumnFamily(10);
|
||||
|
@ -191,9 +190,7 @@ public class TestGet {
|
|||
|
||||
// from Get class
|
||||
assertEquals(get.isCheckExistenceOnly(), copyGet.isCheckExistenceOnly());
|
||||
assertEquals(get.isClosestRowBefore(), copyGet.isClosestRowBefore());
|
||||
assertTrue(get.getTimeRange().equals(copyGet.getTimeRange()));
|
||||
assertEquals(get.isClosestRowBefore(), copyGet.isClosestRowBefore());
|
||||
assertEquals(get.getMaxVersions(), copyGet.getMaxVersions());
|
||||
assertEquals(get.getMaxResultsPerColumnFamily(), copyGet.getMaxResultsPerColumnFamily());
|
||||
assertEquals(get.getRowOffsetPerColumnFamily(), copyGet.getRowOffsetPerColumnFamily());
|
||||
|
|
|
@ -1926,26 +1926,6 @@ public final class ClientProtos {
|
|||
*/
|
||||
boolean getExistenceOnly();
|
||||
|
||||
// optional bool closest_row_before = 11 [default = false];
|
||||
/**
|
||||
* <code>optional bool closest_row_before = 11 [default = false];</code>
|
||||
*
|
||||
* <pre>
|
||||
* If the row to get doesn't exist, return the
|
||||
* closest row before.
|
||||
* </pre>
|
||||
*/
|
||||
boolean hasClosestRowBefore();
|
||||
/**
|
||||
* <code>optional bool closest_row_before = 11 [default = false];</code>
|
||||
*
|
||||
* <pre>
|
||||
* If the row to get doesn't exist, return the
|
||||
* closest row before.
|
||||
* </pre>
|
||||
*/
|
||||
boolean getClosestRowBefore();
|
||||
|
||||
// optional .hbase.pb.Consistency consistency = 12 [default = STRONG];
|
||||
/**
|
||||
* <code>optional .hbase.pb.Consistency consistency = 12 [default = STRONG];</code>
|
||||
|
@ -1963,8 +1943,7 @@ public final class ClientProtos {
|
|||
**
|
||||
* The protocol buffer version of Get.
|
||||
* Unless existence_only is specified, return all the requested data
|
||||
* for the row that matches exactly, or the one that immediately
|
||||
* precedes it if closest_row_before is specified.
|
||||
* for the row that matches exactly.
|
||||
* </pre>
|
||||
*/
|
||||
public static final class Get extends
|
||||
|
@ -2087,18 +2066,13 @@ public final class ClientProtos {
|
|||
existenceOnly_ = input.readBool();
|
||||
break;
|
||||
}
|
||||
case 88: {
|
||||
bitField0_ |= 0x00000100;
|
||||
closestRowBefore_ = input.readBool();
|
||||
break;
|
||||
}
|
||||
case 96: {
|
||||
int rawValue = input.readEnum();
|
||||
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency value = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency.valueOf(rawValue);
|
||||
if (value == null) {
|
||||
unknownFields.mergeVarintField(12, rawValue);
|
||||
} else {
|
||||
bitField0_ |= 0x00000200;
|
||||
bitField0_ |= 0x00000100;
|
||||
consistency_ = value;
|
||||
}
|
||||
break;
|
||||
|
@ -2371,32 +2345,6 @@ public final class ClientProtos {
|
|||
return existenceOnly_;
|
||||
}
|
||||
|
||||
// optional bool closest_row_before = 11 [default = false];
|
||||
public static final int CLOSEST_ROW_BEFORE_FIELD_NUMBER = 11;
|
||||
private boolean closestRowBefore_;
|
||||
/**
|
||||
* <code>optional bool closest_row_before = 11 [default = false];</code>
|
||||
*
|
||||
* <pre>
|
||||
* If the row to get doesn't exist, return the
|
||||
* closest row before.
|
||||
* </pre>
|
||||
*/
|
||||
public boolean hasClosestRowBefore() {
|
||||
return ((bitField0_ & 0x00000100) == 0x00000100);
|
||||
}
|
||||
/**
|
||||
* <code>optional bool closest_row_before = 11 [default = false];</code>
|
||||
*
|
||||
* <pre>
|
||||
* If the row to get doesn't exist, return the
|
||||
* closest row before.
|
||||
* </pre>
|
||||
*/
|
||||
public boolean getClosestRowBefore() {
|
||||
return closestRowBefore_;
|
||||
}
|
||||
|
||||
// optional .hbase.pb.Consistency consistency = 12 [default = STRONG];
|
||||
public static final int CONSISTENCY_FIELD_NUMBER = 12;
|
||||
private org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency consistency_;
|
||||
|
@ -2404,7 +2352,7 @@ public final class ClientProtos {
|
|||
* <code>optional .hbase.pb.Consistency consistency = 12 [default = STRONG];</code>
|
||||
*/
|
||||
public boolean hasConsistency() {
|
||||
return ((bitField0_ & 0x00000200) == 0x00000200);
|
||||
return ((bitField0_ & 0x00000100) == 0x00000100);
|
||||
}
|
||||
/**
|
||||
* <code>optional .hbase.pb.Consistency consistency = 12 [default = STRONG];</code>
|
||||
|
@ -2424,7 +2372,6 @@ public final class ClientProtos {
|
|||
storeLimit_ = 0;
|
||||
storeOffset_ = 0;
|
||||
existenceOnly_ = false;
|
||||
closestRowBefore_ = false;
|
||||
consistency_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency.STRONG;
|
||||
}
|
||||
private byte memoizedIsInitialized = -1;
|
||||
|
@ -2492,9 +2439,6 @@ public final class ClientProtos {
|
|||
output.writeBool(10, existenceOnly_);
|
||||
}
|
||||
if (((bitField0_ & 0x00000100) == 0x00000100)) {
|
||||
output.writeBool(11, closestRowBefore_);
|
||||
}
|
||||
if (((bitField0_ & 0x00000200) == 0x00000200)) {
|
||||
output.writeEnum(12, consistency_.getNumber());
|
||||
}
|
||||
getUnknownFields().writeTo(output);
|
||||
|
@ -2547,10 +2491,6 @@ public final class ClientProtos {
|
|||
.computeBoolSize(10, existenceOnly_);
|
||||
}
|
||||
if (((bitField0_ & 0x00000100) == 0x00000100)) {
|
||||
size += com.google.protobuf.CodedOutputStream
|
||||
.computeBoolSize(11, closestRowBefore_);
|
||||
}
|
||||
if (((bitField0_ & 0x00000200) == 0x00000200)) {
|
||||
size += com.google.protobuf.CodedOutputStream
|
||||
.computeEnumSize(12, consistency_.getNumber());
|
||||
}
|
||||
|
@ -2621,11 +2561,6 @@ public final class ClientProtos {
|
|||
result = result && (getExistenceOnly()
|
||||
== other.getExistenceOnly());
|
||||
}
|
||||
result = result && (hasClosestRowBefore() == other.hasClosestRowBefore());
|
||||
if (hasClosestRowBefore()) {
|
||||
result = result && (getClosestRowBefore()
|
||||
== other.getClosestRowBefore());
|
||||
}
|
||||
result = result && (hasConsistency() == other.hasConsistency());
|
||||
if (hasConsistency()) {
|
||||
result = result &&
|
||||
|
@ -2684,10 +2619,6 @@ public final class ClientProtos {
|
|||
hash = (37 * hash) + EXISTENCE_ONLY_FIELD_NUMBER;
|
||||
hash = (53 * hash) + hashBoolean(getExistenceOnly());
|
||||
}
|
||||
if (hasClosestRowBefore()) {
|
||||
hash = (37 * hash) + CLOSEST_ROW_BEFORE_FIELD_NUMBER;
|
||||
hash = (53 * hash) + hashBoolean(getClosestRowBefore());
|
||||
}
|
||||
if (hasConsistency()) {
|
||||
hash = (37 * hash) + CONSISTENCY_FIELD_NUMBER;
|
||||
hash = (53 * hash) + hashEnum(getConsistency());
|
||||
|
@ -2770,8 +2701,7 @@ public final class ClientProtos {
|
|||
**
|
||||
* The protocol buffer version of Get.
|
||||
* Unless existence_only is specified, return all the requested data
|
||||
* for the row that matches exactly, or the one that immediately
|
||||
* precedes it if closest_row_before is specified.
|
||||
* for the row that matches exactly.
|
||||
* </pre>
|
||||
*/
|
||||
public static final class Builder extends
|
||||
|
@ -2849,10 +2779,8 @@ public final class ClientProtos {
|
|||
bitField0_ = (bitField0_ & ~0x00000100);
|
||||
existenceOnly_ = false;
|
||||
bitField0_ = (bitField0_ & ~0x00000200);
|
||||
closestRowBefore_ = false;
|
||||
bitField0_ = (bitField0_ & ~0x00000400);
|
||||
consistency_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency.STRONG;
|
||||
bitField0_ = (bitField0_ & ~0x00000800);
|
||||
bitField0_ = (bitField0_ & ~0x00000400);
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -2942,10 +2870,6 @@ public final class ClientProtos {
|
|||
if (((from_bitField0_ & 0x00000400) == 0x00000400)) {
|
||||
to_bitField0_ |= 0x00000100;
|
||||
}
|
||||
result.closestRowBefore_ = closestRowBefore_;
|
||||
if (((from_bitField0_ & 0x00000800) == 0x00000800)) {
|
||||
to_bitField0_ |= 0x00000200;
|
||||
}
|
||||
result.consistency_ = consistency_;
|
||||
result.bitField0_ = to_bitField0_;
|
||||
onBuilt();
|
||||
|
@ -3039,9 +2963,6 @@ public final class ClientProtos {
|
|||
if (other.hasExistenceOnly()) {
|
||||
setExistenceOnly(other.getExistenceOnly());
|
||||
}
|
||||
if (other.hasClosestRowBefore()) {
|
||||
setClosestRowBefore(other.getClosestRowBefore());
|
||||
}
|
||||
if (other.hasConsistency()) {
|
||||
setConsistency(other.getConsistency());
|
||||
}
|
||||
|
@ -4029,66 +3950,13 @@ public final class ClientProtos {
|
|||
return this;
|
||||
}
|
||||
|
||||
// optional bool closest_row_before = 11 [default = false];
|
||||
private boolean closestRowBefore_ ;
|
||||
/**
|
||||
* <code>optional bool closest_row_before = 11 [default = false];</code>
|
||||
*
|
||||
* <pre>
|
||||
* If the row to get doesn't exist, return the
|
||||
* closest row before.
|
||||
* </pre>
|
||||
*/
|
||||
public boolean hasClosestRowBefore() {
|
||||
return ((bitField0_ & 0x00000400) == 0x00000400);
|
||||
}
|
||||
/**
|
||||
* <code>optional bool closest_row_before = 11 [default = false];</code>
|
||||
*
|
||||
* <pre>
|
||||
* If the row to get doesn't exist, return the
|
||||
* closest row before.
|
||||
* </pre>
|
||||
*/
|
||||
public boolean getClosestRowBefore() {
|
||||
return closestRowBefore_;
|
||||
}
|
||||
/**
|
||||
* <code>optional bool closest_row_before = 11 [default = false];</code>
|
||||
*
|
||||
* <pre>
|
||||
* If the row to get doesn't exist, return the
|
||||
* closest row before.
|
||||
* </pre>
|
||||
*/
|
||||
public Builder setClosestRowBefore(boolean value) {
|
||||
bitField0_ |= 0x00000400;
|
||||
closestRowBefore_ = value;
|
||||
onChanged();
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* <code>optional bool closest_row_before = 11 [default = false];</code>
|
||||
*
|
||||
* <pre>
|
||||
* If the row to get doesn't exist, return the
|
||||
* closest row before.
|
||||
* </pre>
|
||||
*/
|
||||
public Builder clearClosestRowBefore() {
|
||||
bitField0_ = (bitField0_ & ~0x00000400);
|
||||
closestRowBefore_ = false;
|
||||
onChanged();
|
||||
return this;
|
||||
}
|
||||
|
||||
// optional .hbase.pb.Consistency consistency = 12 [default = STRONG];
|
||||
private org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency consistency_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency.STRONG;
|
||||
/**
|
||||
* <code>optional .hbase.pb.Consistency consistency = 12 [default = STRONG];</code>
|
||||
*/
|
||||
public boolean hasConsistency() {
|
||||
return ((bitField0_ & 0x00000800) == 0x00000800);
|
||||
return ((bitField0_ & 0x00000400) == 0x00000400);
|
||||
}
|
||||
/**
|
||||
* <code>optional .hbase.pb.Consistency consistency = 12 [default = STRONG];</code>
|
||||
|
@ -4103,7 +3971,7 @@ public final class ClientProtos {
|
|||
if (value == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
bitField0_ |= 0x00000800;
|
||||
bitField0_ |= 0x00000400;
|
||||
consistency_ = value;
|
||||
onChanged();
|
||||
return this;
|
||||
|
@ -4112,7 +3980,7 @@ public final class ClientProtos {
|
|||
* <code>optional .hbase.pb.Consistency consistency = 12 [default = STRONG];</code>
|
||||
*/
|
||||
public Builder clearConsistency() {
|
||||
bitField0_ = (bitField0_ & ~0x00000800);
|
||||
bitField0_ = (bitField0_ & ~0x00000400);
|
||||
consistency_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency.STRONG;
|
||||
onChanged();
|
||||
return this;
|
||||
|
@ -33253,135 +33121,134 @@ public final class ClientProtos {
|
|||
"o\032\017MapReduce.proto\"\037\n\016Authorizations\022\r\n\005" +
|
||||
"label\030\001 \003(\t\"$\n\016CellVisibility\022\022\n\nexpress" +
|
||||
"ion\030\001 \002(\t\"+\n\006Column\022\016\n\006family\030\001 \002(\014\022\021\n\tq" +
|
||||
"ualifier\030\002 \003(\014\"\201\003\n\003Get\022\013\n\003row\030\001 \002(\014\022 \n\006c" +
|
||||
"ualifier\030\002 \003(\014\"\336\002\n\003Get\022\013\n\003row\030\001 \002(\014\022 \n\006c" +
|
||||
"olumn\030\002 \003(\0132\020.hbase.pb.Column\022*\n\tattribu" +
|
||||
"te\030\003 \003(\0132\027.hbase.pb.NameBytesPair\022 \n\006fil" +
|
||||
"ter\030\004 \001(\0132\020.hbase.pb.Filter\022\'\n\ntime_rang" +
|
||||
"e\030\005 \001(\0132\023.hbase.pb.TimeRange\022\027\n\014max_vers",
|
||||
"ions\030\006 \001(\r:\0011\022\032\n\014cache_blocks\030\007 \001(\010:\004tru" +
|
||||
"e\022\023\n\013store_limit\030\010 \001(\r\022\024\n\014store_offset\030\t" +
|
||||
" \001(\r\022\035\n\016existence_only\030\n \001(\010:\005false\022!\n\022c" +
|
||||
"losest_row_before\030\013 \001(\010:\005false\0222\n\013consis" +
|
||||
"tency\030\014 \001(\0162\025.hbase.pb.Consistency:\006STRO" +
|
||||
"NG\"\203\001\n\006Result\022\034\n\004cell\030\001 \003(\0132\016.hbase.pb.C" +
|
||||
"ell\022\035\n\025associated_cell_count\030\002 \001(\005\022\016\n\006ex" +
|
||||
"ists\030\003 \001(\010\022\024\n\005stale\030\004 \001(\010:\005false\022\026\n\007part" +
|
||||
"ial\030\005 \001(\010:\005false\"S\n\nGetRequest\022)\n\006region" +
|
||||
"\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022\032\n\003get",
|
||||
"\030\002 \002(\0132\r.hbase.pb.Get\"/\n\013GetResponse\022 \n\006" +
|
||||
"result\030\001 \001(\0132\020.hbase.pb.Result\"\222\001\n\tCondi" +
|
||||
"tion\022\013\n\003row\030\001 \002(\014\022\016\n\006family\030\002 \002(\014\022\021\n\tqua" +
|
||||
"lifier\030\003 \002(\014\022+\n\014compare_type\030\004 \002(\0162\025.hba" +
|
||||
"se.pb.CompareType\022(\n\ncomparator\030\005 \002(\0132\024." +
|
||||
"hbase.pb.Comparator\"\364\006\n\rMutationProto\022\013\n" +
|
||||
"\003row\030\001 \001(\014\0229\n\013mutate_type\030\002 \001(\0162$.hbase." +
|
||||
"pb.MutationProto.MutationType\0229\n\014column_" +
|
||||
"value\030\003 \003(\0132#.hbase.pb.MutationProto.Col" +
|
||||
"umnValue\022\021\n\ttimestamp\030\004 \001(\004\022*\n\tattribute",
|
||||
"\030\005 \003(\0132\027.hbase.pb.NameBytesPair\022C\n\ndurab" +
|
||||
"ility\030\006 \001(\0162\".hbase.pb.MutationProto.Dur" +
|
||||
"ability:\013USE_DEFAULT\022\'\n\ntime_range\030\007 \001(\013" +
|
||||
"2\023.hbase.pb.TimeRange\022\035\n\025associated_cell" +
|
||||
"_count\030\010 \001(\005\022\r\n\005nonce\030\t \001(\004\032\371\001\n\013ColumnVa" +
|
||||
"lue\022\016\n\006family\030\001 \002(\014\022K\n\017qualifier_value\030\002" +
|
||||
" \003(\01322.hbase.pb.MutationProto.ColumnValu" +
|
||||
"e.QualifierValue\032\214\001\n\016QualifierValue\022\021\n\tq" +
|
||||
"ualifier\030\001 \001(\014\022\r\n\005value\030\002 \001(\014\022\021\n\ttimesta" +
|
||||
"mp\030\003 \001(\004\0227\n\013delete_type\030\004 \001(\0162\".hbase.pb",
|
||||
".MutationProto.DeleteType\022\014\n\004tags\030\005 \001(\014\"" +
|
||||
"W\n\nDurability\022\017\n\013USE_DEFAULT\020\000\022\014\n\010SKIP_W" +
|
||||
"AL\020\001\022\r\n\tASYNC_WAL\020\002\022\014\n\010SYNC_WAL\020\003\022\r\n\tFSY" +
|
||||
"NC_WAL\020\004\">\n\014MutationType\022\n\n\006APPEND\020\000\022\r\n\t" +
|
||||
"INCREMENT\020\001\022\007\n\003PUT\020\002\022\n\n\006DELETE\020\003\"p\n\nDele" +
|
||||
"teType\022\026\n\022DELETE_ONE_VERSION\020\000\022\034\n\030DELETE" +
|
||||
"_MULTIPLE_VERSIONS\020\001\022\021\n\rDELETE_FAMILY\020\002\022" +
|
||||
"\031\n\025DELETE_FAMILY_VERSION\020\003\"\242\001\n\rMutateReq" +
|
||||
"uest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSp" +
|
||||
"ecifier\022)\n\010mutation\030\002 \002(\0132\027.hbase.pb.Mut",
|
||||
"ationProto\022&\n\tcondition\030\003 \001(\0132\023.hbase.pb" +
|
||||
".Condition\022\023\n\013nonce_group\030\004 \001(\004\"E\n\016Mutat" +
|
||||
"eResponse\022 \n\006result\030\001 \001(\0132\020.hbase.pb.Res" +
|
||||
"ult\022\021\n\tprocessed\030\002 \001(\010\"\346\003\n\004Scan\022 \n\006colum" +
|
||||
"n\030\001 \003(\0132\020.hbase.pb.Column\022*\n\tattribute\030\002" +
|
||||
" \003(\0132\027.hbase.pb.NameBytesPair\022\021\n\tstart_r" +
|
||||
"ow\030\003 \001(\014\022\020\n\010stop_row\030\004 \001(\014\022 \n\006filter\030\005 \001" +
|
||||
"(\0132\020.hbase.pb.Filter\022\'\n\ntime_range\030\006 \001(\013" +
|
||||
"2\023.hbase.pb.TimeRange\022\027\n\014max_versions\030\007 " +
|
||||
"\001(\r:\0011\022\032\n\014cache_blocks\030\010 \001(\010:\004true\022\022\n\nba",
|
||||
"tch_size\030\t \001(\r\022\027\n\017max_result_size\030\n \001(\004\022" +
|
||||
"\023\n\013store_limit\030\013 \001(\r\022\024\n\014store_offset\030\014 \001" +
|
||||
"(\r\022&\n\036load_column_families_on_demand\030\r \001" +
|
||||
"(\010\022\r\n\005small\030\016 \001(\010\022\027\n\010reversed\030\017 \001(\010:\005fal" +
|
||||
"se\0222\n\013consistency\030\020 \001(\0162\025.hbase.pb.Consi" +
|
||||
"stency:\006STRONG\022\017\n\007caching\030\021 \001(\r\"\220\002\n\013Scan" +
|
||||
"Request\022)\n\006region\030\001 \001(\0132\031.hbase.pb.Regio" +
|
||||
"nSpecifier\022\034\n\004scan\030\002 \001(\0132\016.hbase.pb.Scan" +
|
||||
"\022\022\n\nscanner_id\030\003 \001(\004\022\026\n\016number_of_rows\030\004" +
|
||||
" \001(\r\022\025\n\rclose_scanner\030\005 \001(\010\022\025\n\rnext_call",
|
||||
"_seq\030\006 \001(\004\022\037\n\027client_handles_partials\030\007 " +
|
||||
"\001(\010\022!\n\031client_handles_heartbeats\030\010 \001(\010\022\032" +
|
||||
"\n\022track_scan_metrics\030\t \001(\010\"\232\002\n\014ScanRespo" +
|
||||
"nse\022\030\n\020cells_per_result\030\001 \003(\r\022\022\n\nscanner" +
|
||||
"_id\030\002 \001(\004\022\024\n\014more_results\030\003 \001(\010\022\013\n\003ttl\030\004" +
|
||||
" \001(\r\022!\n\007results\030\005 \003(\0132\020.hbase.pb.Result\022" +
|
||||
"\r\n\005stale\030\006 \001(\010\022\037\n\027partial_flag_per_resul" +
|
||||
"t\030\007 \003(\010\022\036\n\026more_results_in_region\030\010 \001(\010\022" +
|
||||
"\031\n\021heartbeat_message\030\t \001(\010\022+\n\014scan_metri" +
|
||||
"cs\030\n \001(\0132\025.hbase.pb.ScanMetrics\"\305\001\n\024Bulk",
|
||||
"LoadHFileRequest\022)\n\006region\030\001 \002(\0132\031.hbase" +
|
||||
".pb.RegionSpecifier\022>\n\013family_path\030\002 \003(\013" +
|
||||
"2).hbase.pb.BulkLoadHFileRequest.FamilyP" +
|
||||
"ath\022\026\n\016assign_seq_num\030\003 \001(\010\032*\n\nFamilyPat" +
|
||||
"h\022\016\n\006family\030\001 \002(\014\022\014\n\004path\030\002 \002(\t\"\'\n\025BulkL" +
|
||||
"oadHFileResponse\022\016\n\006loaded\030\001 \002(\010\"a\n\026Copr" +
|
||||
"ocessorServiceCall\022\013\n\003row\030\001 \002(\014\022\024\n\014servi" +
|
||||
"ce_name\030\002 \002(\t\022\023\n\013method_name\030\003 \002(\t\022\017\n\007re" +
|
||||
"quest\030\004 \002(\014\"B\n\030CoprocessorServiceResult\022" +
|
||||
"&\n\005value\030\001 \001(\0132\027.hbase.pb.NameBytesPair\"",
|
||||
"v\n\031CoprocessorServiceRequest\022)\n\006region\030\001" +
|
||||
" \002(\0132\031.hbase.pb.RegionSpecifier\022.\n\004call\030" +
|
||||
"\002 \002(\0132 .hbase.pb.CoprocessorServiceCall\"" +
|
||||
"o\n\032CoprocessorServiceResponse\022)\n\006region\030" +
|
||||
"\001 \002(\0132\031.hbase.pb.RegionSpecifier\022&\n\005valu" +
|
||||
"e\030\002 \002(\0132\027.hbase.pb.NameBytesPair\"\226\001\n\006Act" +
|
||||
"ion\022\r\n\005index\030\001 \001(\r\022)\n\010mutation\030\002 \001(\0132\027.h" +
|
||||
"base.pb.MutationProto\022\032\n\003get\030\003 \001(\0132\r.hba" +
|
||||
"se.pb.Get\0226\n\014service_call\030\004 \001(\0132 .hbase." +
|
||||
"pb.CoprocessorServiceCall\"k\n\014RegionActio",
|
||||
"n\022)\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSpeci" +
|
||||
"fier\022\016\n\006atomic\030\002 \001(\010\022 \n\006action\030\003 \003(\0132\020.h" +
|
||||
"base.pb.Action\"D\n\017RegionLoadStats\022\027\n\014mem" +
|
||||
"storeLoad\030\001 \001(\005:\0010\022\030\n\rheapOccupancy\030\002 \001(" +
|
||||
"\005:\0010\"\332\001\n\021ResultOrException\022\r\n\005index\030\001 \001(" +
|
||||
"\r\022 \n\006result\030\002 \001(\0132\020.hbase.pb.Result\022*\n\te" +
|
||||
"xception\030\003 \001(\0132\027.hbase.pb.NameBytesPair\022" +
|
||||
":\n\016service_result\030\004 \001(\0132\".hbase.pb.Copro" +
|
||||
"cessorServiceResult\022,\n\tloadStats\030\005 \001(\0132\031" +
|
||||
".hbase.pb.RegionLoadStats\"x\n\022RegionActio",
|
||||
"nResult\0226\n\021resultOrException\030\001 \003(\0132\033.hba" +
|
||||
"se.pb.ResultOrException\022*\n\texception\030\002 \001" +
|
||||
"(\0132\027.hbase.pb.NameBytesPair\"x\n\014MultiRequ" +
|
||||
"est\022,\n\014regionAction\030\001 \003(\0132\026.hbase.pb.Reg" +
|
||||
"ionAction\022\022\n\nnonceGroup\030\002 \001(\004\022&\n\tconditi" +
|
||||
"on\030\003 \001(\0132\023.hbase.pb.Condition\"\\\n\rMultiRe" +
|
||||
"sponse\0228\n\022regionActionResult\030\001 \003(\0132\034.hba" +
|
||||
"se.pb.RegionActionResult\022\021\n\tprocessed\030\002 " +
|
||||
"\001(\010*\'\n\013Consistency\022\n\n\006STRONG\020\000\022\014\n\010TIMELI" +
|
||||
"NE\020\0012\203\004\n\rClientService\0222\n\003Get\022\024.hbase.pb",
|
||||
".GetRequest\032\025.hbase.pb.GetResponse\022;\n\006Mu" +
|
||||
"tate\022\027.hbase.pb.MutateRequest\032\030.hbase.pb" +
|
||||
".MutateResponse\0225\n\004Scan\022\025.hbase.pb.ScanR" +
|
||||
"equest\032\026.hbase.pb.ScanResponse\022P\n\rBulkLo" +
|
||||
"adHFile\022\036.hbase.pb.BulkLoadHFileRequest\032" +
|
||||
"\037.hbase.pb.BulkLoadHFileResponse\022X\n\013Exec" +
|
||||
"Service\022#.hbase.pb.CoprocessorServiceReq" +
|
||||
"uest\032$.hbase.pb.CoprocessorServiceRespon" +
|
||||
"se\022d\n\027ExecRegionServerService\022#.hbase.pb" +
|
||||
".CoprocessorServiceRequest\032$.hbase.pb.Co",
|
||||
"processorServiceResponse\0228\n\005Multi\022\026.hbas" +
|
||||
"e.pb.MultiRequest\032\027.hbase.pb.MultiRespon" +
|
||||
"seBB\n*org.apache.hadoop.hbase.protobuf.g" +
|
||||
"eneratedB\014ClientProtosH\001\210\001\001\240\001\001"
|
||||
" \001(\r\022\035\n\016existence_only\030\n \001(\010:\005false\0222\n\013c" +
|
||||
"onsistency\030\014 \001(\0162\025.hbase.pb.Consistency:" +
|
||||
"\006STRONG\"\203\001\n\006Result\022\034\n\004cell\030\001 \003(\0132\016.hbase" +
|
||||
".pb.Cell\022\035\n\025associated_cell_count\030\002 \001(\005\022" +
|
||||
"\016\n\006exists\030\003 \001(\010\022\024\n\005stale\030\004 \001(\010:\005false\022\026\n" +
|
||||
"\007partial\030\005 \001(\010:\005false\"S\n\nGetRequest\022)\n\006r" +
|
||||
"egion\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022\032" +
|
||||
"\n\003get\030\002 \002(\0132\r.hbase.pb.Get\"/\n\013GetRespons",
|
||||
"e\022 \n\006result\030\001 \001(\0132\020.hbase.pb.Result\"\222\001\n\t" +
|
||||
"Condition\022\013\n\003row\030\001 \002(\014\022\016\n\006family\030\002 \002(\014\022\021" +
|
||||
"\n\tqualifier\030\003 \002(\014\022+\n\014compare_type\030\004 \002(\0162" +
|
||||
"\025.hbase.pb.CompareType\022(\n\ncomparator\030\005 \002" +
|
||||
"(\0132\024.hbase.pb.Comparator\"\364\006\n\rMutationPro" +
|
||||
"to\022\013\n\003row\030\001 \001(\014\0229\n\013mutate_type\030\002 \001(\0162$.h" +
|
||||
"base.pb.MutationProto.MutationType\0229\n\014co" +
|
||||
"lumn_value\030\003 \003(\0132#.hbase.pb.MutationProt" +
|
||||
"o.ColumnValue\022\021\n\ttimestamp\030\004 \001(\004\022*\n\tattr" +
|
||||
"ibute\030\005 \003(\0132\027.hbase.pb.NameBytesPair\022C\n\n",
|
||||
"durability\030\006 \001(\0162\".hbase.pb.MutationProt" +
|
||||
"o.Durability:\013USE_DEFAULT\022\'\n\ntime_range\030" +
|
||||
"\007 \001(\0132\023.hbase.pb.TimeRange\022\035\n\025associated" +
|
||||
"_cell_count\030\010 \001(\005\022\r\n\005nonce\030\t \001(\004\032\371\001\n\013Col" +
|
||||
"umnValue\022\016\n\006family\030\001 \002(\014\022K\n\017qualifier_va" +
|
||||
"lue\030\002 \003(\01322.hbase.pb.MutationProto.Colum" +
|
||||
"nValue.QualifierValue\032\214\001\n\016QualifierValue" +
|
||||
"\022\021\n\tqualifier\030\001 \001(\014\022\r\n\005value\030\002 \001(\014\022\021\n\tti" +
|
||||
"mestamp\030\003 \001(\004\0227\n\013delete_type\030\004 \001(\0162\".hba" +
|
||||
"se.pb.MutationProto.DeleteType\022\014\n\004tags\030\005",
|
||||
" \001(\014\"W\n\nDurability\022\017\n\013USE_DEFAULT\020\000\022\014\n\010S" +
|
||||
"KIP_WAL\020\001\022\r\n\tASYNC_WAL\020\002\022\014\n\010SYNC_WAL\020\003\022\r" +
|
||||
"\n\tFSYNC_WAL\020\004\">\n\014MutationType\022\n\n\006APPEND\020" +
|
||||
"\000\022\r\n\tINCREMENT\020\001\022\007\n\003PUT\020\002\022\n\n\006DELETE\020\003\"p\n" +
|
||||
"\nDeleteType\022\026\n\022DELETE_ONE_VERSION\020\000\022\034\n\030D" +
|
||||
"ELETE_MULTIPLE_VERSIONS\020\001\022\021\n\rDELETE_FAMI" +
|
||||
"LY\020\002\022\031\n\025DELETE_FAMILY_VERSION\020\003\"\242\001\n\rMuta" +
|
||||
"teRequest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.Reg" +
|
||||
"ionSpecifier\022)\n\010mutation\030\002 \002(\0132\027.hbase.p" +
|
||||
"b.MutationProto\022&\n\tcondition\030\003 \001(\0132\023.hba",
|
||||
"se.pb.Condition\022\023\n\013nonce_group\030\004 \001(\004\"E\n\016" +
|
||||
"MutateResponse\022 \n\006result\030\001 \001(\0132\020.hbase.p" +
|
||||
"b.Result\022\021\n\tprocessed\030\002 \001(\010\"\346\003\n\004Scan\022 \n\006" +
|
||||
"column\030\001 \003(\0132\020.hbase.pb.Column\022*\n\tattrib" +
|
||||
"ute\030\002 \003(\0132\027.hbase.pb.NameBytesPair\022\021\n\tst" +
|
||||
"art_row\030\003 \001(\014\022\020\n\010stop_row\030\004 \001(\014\022 \n\006filte" +
|
||||
"r\030\005 \001(\0132\020.hbase.pb.Filter\022\'\n\ntime_range\030" +
|
||||
"\006 \001(\0132\023.hbase.pb.TimeRange\022\027\n\014max_versio" +
|
||||
"ns\030\007 \001(\r:\0011\022\032\n\014cache_blocks\030\010 \001(\010:\004true\022" +
|
||||
"\022\n\nbatch_size\030\t \001(\r\022\027\n\017max_result_size\030\n",
|
||||
" \001(\004\022\023\n\013store_limit\030\013 \001(\r\022\024\n\014store_offse" +
|
||||
"t\030\014 \001(\r\022&\n\036load_column_families_on_deman" +
|
||||
"d\030\r \001(\010\022\r\n\005small\030\016 \001(\010\022\027\n\010reversed\030\017 \001(\010" +
|
||||
":\005false\0222\n\013consistency\030\020 \001(\0162\025.hbase.pb." +
|
||||
"Consistency:\006STRONG\022\017\n\007caching\030\021 \001(\r\"\220\002\n" +
|
||||
"\013ScanRequest\022)\n\006region\030\001 \001(\0132\031.hbase.pb." +
|
||||
"RegionSpecifier\022\034\n\004scan\030\002 \001(\0132\016.hbase.pb" +
|
||||
".Scan\022\022\n\nscanner_id\030\003 \001(\004\022\026\n\016number_of_r" +
|
||||
"ows\030\004 \001(\r\022\025\n\rclose_scanner\030\005 \001(\010\022\025\n\rnext" +
|
||||
"_call_seq\030\006 \001(\004\022\037\n\027client_handles_partia",
|
||||
"ls\030\007 \001(\010\022!\n\031client_handles_heartbeats\030\010 " +
|
||||
"\001(\010\022\032\n\022track_scan_metrics\030\t \001(\010\"\232\002\n\014Scan" +
|
||||
"Response\022\030\n\020cells_per_result\030\001 \003(\r\022\022\n\nsc" +
|
||||
"anner_id\030\002 \001(\004\022\024\n\014more_results\030\003 \001(\010\022\013\n\003" +
|
||||
"ttl\030\004 \001(\r\022!\n\007results\030\005 \003(\0132\020.hbase.pb.Re" +
|
||||
"sult\022\r\n\005stale\030\006 \001(\010\022\037\n\027partial_flag_per_" +
|
||||
"result\030\007 \003(\010\022\036\n\026more_results_in_region\030\010" +
|
||||
" \001(\010\022\031\n\021heartbeat_message\030\t \001(\010\022+\n\014scan_" +
|
||||
"metrics\030\n \001(\0132\025.hbase.pb.ScanMetrics\"\305\001\n" +
|
||||
"\024BulkLoadHFileRequest\022)\n\006region\030\001 \002(\0132\031.",
|
||||
"hbase.pb.RegionSpecifier\022>\n\013family_path\030" +
|
||||
"\002 \003(\0132).hbase.pb.BulkLoadHFileRequest.Fa" +
|
||||
"milyPath\022\026\n\016assign_seq_num\030\003 \001(\010\032*\n\nFami" +
|
||||
"lyPath\022\016\n\006family\030\001 \002(\014\022\014\n\004path\030\002 \002(\t\"\'\n\025" +
|
||||
"BulkLoadHFileResponse\022\016\n\006loaded\030\001 \002(\010\"a\n" +
|
||||
"\026CoprocessorServiceCall\022\013\n\003row\030\001 \002(\014\022\024\n\014" +
|
||||
"service_name\030\002 \002(\t\022\023\n\013method_name\030\003 \002(\t\022" +
|
||||
"\017\n\007request\030\004 \002(\014\"B\n\030CoprocessorServiceRe" +
|
||||
"sult\022&\n\005value\030\001 \001(\0132\027.hbase.pb.NameBytes" +
|
||||
"Pair\"v\n\031CoprocessorServiceRequest\022)\n\006reg",
|
||||
"ion\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022.\n\004" +
|
||||
"call\030\002 \002(\0132 .hbase.pb.CoprocessorService" +
|
||||
"Call\"o\n\032CoprocessorServiceResponse\022)\n\006re" +
|
||||
"gion\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022&\n" +
|
||||
"\005value\030\002 \002(\0132\027.hbase.pb.NameBytesPair\"\226\001" +
|
||||
"\n\006Action\022\r\n\005index\030\001 \001(\r\022)\n\010mutation\030\002 \001(" +
|
||||
"\0132\027.hbase.pb.MutationProto\022\032\n\003get\030\003 \001(\0132" +
|
||||
"\r.hbase.pb.Get\0226\n\014service_call\030\004 \001(\0132 .h" +
|
||||
"base.pb.CoprocessorServiceCall\"k\n\014Region" +
|
||||
"Action\022)\n\006region\030\001 \002(\0132\031.hbase.pb.Region",
|
||||
"Specifier\022\016\n\006atomic\030\002 \001(\010\022 \n\006action\030\003 \003(" +
|
||||
"\0132\020.hbase.pb.Action\"D\n\017RegionLoadStats\022\027" +
|
||||
"\n\014memstoreLoad\030\001 \001(\005:\0010\022\030\n\rheapOccupancy" +
|
||||
"\030\002 \001(\005:\0010\"\332\001\n\021ResultOrException\022\r\n\005index" +
|
||||
"\030\001 \001(\r\022 \n\006result\030\002 \001(\0132\020.hbase.pb.Result" +
|
||||
"\022*\n\texception\030\003 \001(\0132\027.hbase.pb.NameBytes" +
|
||||
"Pair\022:\n\016service_result\030\004 \001(\0132\".hbase.pb." +
|
||||
"CoprocessorServiceResult\022,\n\tloadStats\030\005 " +
|
||||
"\001(\0132\031.hbase.pb.RegionLoadStats\"x\n\022Region" +
|
||||
"ActionResult\0226\n\021resultOrException\030\001 \003(\0132",
|
||||
"\033.hbase.pb.ResultOrException\022*\n\texceptio" +
|
||||
"n\030\002 \001(\0132\027.hbase.pb.NameBytesPair\"x\n\014Mult" +
|
||||
"iRequest\022,\n\014regionAction\030\001 \003(\0132\026.hbase.p" +
|
||||
"b.RegionAction\022\022\n\nnonceGroup\030\002 \001(\004\022&\n\tco" +
|
||||
"ndition\030\003 \001(\0132\023.hbase.pb.Condition\"\\\n\rMu" +
|
||||
"ltiResponse\0228\n\022regionActionResult\030\001 \003(\0132" +
|
||||
"\034.hbase.pb.RegionActionResult\022\021\n\tprocess" +
|
||||
"ed\030\002 \001(\010*\'\n\013Consistency\022\n\n\006STRONG\020\000\022\014\n\010T" +
|
||||
"IMELINE\020\0012\203\004\n\rClientService\0222\n\003Get\022\024.hba" +
|
||||
"se.pb.GetRequest\032\025.hbase.pb.GetResponse\022",
|
||||
";\n\006Mutate\022\027.hbase.pb.MutateRequest\032\030.hba" +
|
||||
"se.pb.MutateResponse\0225\n\004Scan\022\025.hbase.pb." +
|
||||
"ScanRequest\032\026.hbase.pb.ScanResponse\022P\n\rB" +
|
||||
"ulkLoadHFile\022\036.hbase.pb.BulkLoadHFileReq" +
|
||||
"uest\032\037.hbase.pb.BulkLoadHFileResponse\022X\n" +
|
||||
"\013ExecService\022#.hbase.pb.CoprocessorServi" +
|
||||
"ceRequest\032$.hbase.pb.CoprocessorServiceR" +
|
||||
"esponse\022d\n\027ExecRegionServerService\022#.hba" +
|
||||
"se.pb.CoprocessorServiceRequest\032$.hbase." +
|
||||
"pb.CoprocessorServiceResponse\0228\n\005Multi\022\026",
|
||||
".hbase.pb.MultiRequest\032\027.hbase.pb.MultiR" +
|
||||
"esponseBB\n*org.apache.hadoop.hbase.proto" +
|
||||
"buf.generatedB\014ClientProtosH\001\210\001\001\240\001\001"
|
||||
};
|
||||
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
|
||||
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
|
||||
|
@ -33411,7 +33278,7 @@ public final class ClientProtos {
|
|||
internal_static_hbase_pb_Get_fieldAccessorTable = new
|
||||
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
||||
internal_static_hbase_pb_Get_descriptor,
|
||||
new java.lang.String[] { "Row", "Column", "Attribute", "Filter", "TimeRange", "MaxVersions", "CacheBlocks", "StoreLimit", "StoreOffset", "ExistenceOnly", "ClosestRowBefore", "Consistency", });
|
||||
new java.lang.String[] { "Row", "Column", "Attribute", "Filter", "TimeRange", "MaxVersions", "CacheBlocks", "StoreLimit", "StoreOffset", "ExistenceOnly", "Consistency", });
|
||||
internal_static_hbase_pb_Result_descriptor =
|
||||
getDescriptor().getMessageTypes().get(4);
|
||||
internal_static_hbase_pb_Result_fieldAccessorTable = new
|
||||
|
|
|
@ -64,8 +64,7 @@ enum Consistency {
|
|||
/**
|
||||
* The protocol buffer version of Get.
|
||||
* Unless existence_only is specified, return all the requested data
|
||||
* for the row that matches exactly, or the one that immediately
|
||||
* precedes it if closest_row_before is specified.
|
||||
* for the row that matches exactly.
|
||||
*/
|
||||
message Get {
|
||||
required bytes row = 1;
|
||||
|
@ -82,10 +81,6 @@ message Get {
|
|||
// the existence.
|
||||
optional bool existence_only = 10 [default = false];
|
||||
|
||||
// If the row to get doesn't exist, return the
|
||||
// closest row before.
|
||||
optional bool closest_row_before = 11 [default = false];
|
||||
|
||||
optional Consistency consistency = 12 [default = STRONG];
|
||||
}
|
||||
|
||||
|
|
|
@ -675,10 +675,6 @@ public class RemoteHTable implements Table {
|
|||
return true;
|
||||
}
|
||||
|
||||
public Result getRowOrBefore(byte[] row, byte[] family) throws IOException {
|
||||
throw new IOException("getRowOrBefore not supported");
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier,
|
||||
byte[] value, Put put) throws IOException {
|
||||
|
|
|
@ -115,20 +115,6 @@ public final class HTableWrapper implements Table {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated in 0.99 since setting clearBufferOnFail is deprecated.
|
||||
*/
|
||||
@Deprecated
|
||||
public Result getRowOrBefore(byte[] row, byte[] family)
|
||||
throws IOException {
|
||||
Scan scan = Scan.createGetClosestRowOrBeforeReverseScan(row);
|
||||
Result startRowResult = null;
|
||||
try (ResultScanner resultScanner = this.table.getScanner(scan)) {
|
||||
startRowResult = resultScanner.next();
|
||||
}
|
||||
return startRowResult;
|
||||
}
|
||||
|
||||
public Result get(Get get) throws IOException {
|
||||
return table.get(get);
|
||||
}
|
||||
|
|
|
@ -19,14 +19,14 @@ import java.io.IOException;
|
|||
import java.util.List;
|
||||
import java.util.NavigableSet;
|
||||
|
||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.Cell;
|
||||
import org.apache.hadoop.hbase.CoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||
import org.apache.hadoop.hbase.client.Append;
|
||||
import org.apache.hadoop.hbase.client.Delete;
|
||||
import org.apache.hadoop.hbase.client.Durability;
|
||||
|
@ -54,9 +54,9 @@ import org.apache.hadoop.hbase.regionserver.StoreFile;
|
|||
import org.apache.hadoop.hbase.regionserver.StoreFile.Reader;
|
||||
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
|
||||
import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
|
||||
import org.apache.hadoop.hbase.wal.WALKey;
|
||||
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
|
||||
import org.apache.hadoop.hbase.util.Pair;
|
||||
import org.apache.hadoop.hbase.wal.WALKey;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
|
||||
|
@ -120,7 +120,7 @@ public abstract class BaseRegionObserver implements RegionObserver {
|
|||
@Override
|
||||
public void preSplit(ObserverContext<RegionCoprocessorEnvironment> e) throws IOException {
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void preSplit(ObserverContext<RegionCoprocessorEnvironment> c,
|
||||
byte[] splitRow) throws IOException {
|
||||
|
@ -130,22 +130,22 @@ public abstract class BaseRegionObserver implements RegionObserver {
|
|||
public void preSplitBeforePONR(ObserverContext<RegionCoprocessorEnvironment> ctx,
|
||||
byte[] splitKey, List<Mutation> metaEntries) throws IOException {
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void preSplitAfterPONR(
|
||||
ObserverContext<RegionCoprocessorEnvironment> ctx) throws IOException {
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void preRollBackSplit(ObserverContext<RegionCoprocessorEnvironment> ctx)
|
||||
throws IOException {
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void postRollBackSplit(
|
||||
ObserverContext<RegionCoprocessorEnvironment> ctx) throws IOException {
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void postCompleteSplit(
|
||||
ObserverContext<RegionCoprocessorEnvironment> ctx) throws IOException {
|
||||
|
@ -218,18 +218,6 @@ public abstract class BaseRegionObserver implements RegionObserver {
|
|||
postCompact(e, store, resultFile);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preGetClosestRowBefore(final ObserverContext<RegionCoprocessorEnvironment> e,
|
||||
final byte [] row, final byte [] family, final Result result)
|
||||
throws IOException {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void postGetClosestRowBefore(final ObserverContext<RegionCoprocessorEnvironment> e,
|
||||
final byte [] row, final byte [] family, final Result result)
|
||||
throws IOException {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preGetOp(final ObserverContext<RegionCoprocessorEnvironment> e,
|
||||
final Get get, final List<Cell> results) throws IOException {
|
||||
|
@ -253,12 +241,12 @@ public abstract class BaseRegionObserver implements RegionObserver {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void prePut(final ObserverContext<RegionCoprocessorEnvironment> e,
|
||||
public void prePut(final ObserverContext<RegionCoprocessorEnvironment> e,
|
||||
final Put put, final WALEdit edit, final Durability durability) throws IOException {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void postPut(final ObserverContext<RegionCoprocessorEnvironment> e,
|
||||
public void postPut(final ObserverContext<RegionCoprocessorEnvironment> e,
|
||||
final Put put, final WALEdit edit, final Durability durability) throws IOException {
|
||||
}
|
||||
|
||||
|
@ -278,7 +266,7 @@ public abstract class BaseRegionObserver implements RegionObserver {
|
|||
final Delete delete, final WALEdit edit, final Durability durability)
|
||||
throws IOException {
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void preBatchMutate(final ObserverContext<RegionCoprocessorEnvironment> c,
|
||||
final MiniBatchOperationInProgress<Mutation> miniBatchOp) throws IOException {
|
||||
|
|
|
@ -382,7 +382,7 @@ public interface RegionObserver extends Coprocessor {
|
|||
void preSplitBeforePONR(final ObserverContext<RegionCoprocessorEnvironment> ctx,
|
||||
byte[] splitKey, List<Mutation> metaEntries) throws IOException;
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* This will be called after PONR step as part of split transaction
|
||||
* Calling {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} has no
|
||||
|
@ -391,9 +391,9 @@ public interface RegionObserver extends Coprocessor {
|
|||
* @throws IOException
|
||||
*/
|
||||
void preSplitAfterPONR(final ObserverContext<RegionCoprocessorEnvironment> ctx) throws IOException;
|
||||
|
||||
|
||||
/**
|
||||
* This will be called before the roll back of the split region is completed
|
||||
* This will be called before the roll back of the split region is completed
|
||||
* @param ctx
|
||||
* @throws IOException
|
||||
*/
|
||||
|
@ -419,7 +419,7 @@ public interface RegionObserver extends Coprocessor {
|
|||
* Called before the region is reported as closed to the master.
|
||||
* @param c the environment provided by the region server
|
||||
* @param abortRequested true if the region server is aborting
|
||||
* @throws IOException
|
||||
* @throws IOException
|
||||
*/
|
||||
void preClose(final ObserverContext<RegionCoprocessorEnvironment> c,
|
||||
boolean abortRequested) throws IOException;
|
||||
|
@ -432,40 +432,6 @@ public interface RegionObserver extends Coprocessor {
|
|||
void postClose(final ObserverContext<RegionCoprocessorEnvironment> c,
|
||||
boolean abortRequested);
|
||||
|
||||
/**
|
||||
* Called before a client makes a GetClosestRowBefore request.
|
||||
* <p>
|
||||
* Call CoprocessorEnvironment#bypass to skip default actions
|
||||
* <p>
|
||||
* Call CoprocessorEnvironment#complete to skip any subsequent chained
|
||||
* coprocessors
|
||||
* @param c the environment provided by the region server
|
||||
* @param row the row
|
||||
* @param family the family
|
||||
* @param result The result to return to the client if default processing
|
||||
* is bypassed. Can be modified. Will not be used if default processing
|
||||
* is not bypassed.
|
||||
* @throws IOException if an error occurred on the coprocessor
|
||||
*/
|
||||
void preGetClosestRowBefore(final ObserverContext<RegionCoprocessorEnvironment> c,
|
||||
final byte [] row, final byte [] family, final Result result)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* Called after a client makes a GetClosestRowBefore request.
|
||||
* <p>
|
||||
* Call CoprocessorEnvironment#complete to skip any subsequent chained
|
||||
* coprocessors
|
||||
* @param c the environment provided by the region server
|
||||
* @param row the row
|
||||
* @param family the desired family
|
||||
* @param result the result to return to the client, modify as necessary
|
||||
* @throws IOException if an error occurred on the coprocessor
|
||||
*/
|
||||
void postGetClosestRowBefore(final ObserverContext<RegionCoprocessorEnvironment> c,
|
||||
final byte [] row, final byte [] family, final Result result)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* Called before the client performs a Get
|
||||
* <p>
|
||||
|
@ -543,7 +509,7 @@ public interface RegionObserver extends Coprocessor {
|
|||
* @param durability Persistence guarantee for this Put
|
||||
* @throws IOException if an error occurred on the coprocessor
|
||||
*/
|
||||
void prePut(final ObserverContext<RegionCoprocessorEnvironment> c,
|
||||
void prePut(final ObserverContext<RegionCoprocessorEnvironment> c,
|
||||
final Put put, final WALEdit edit, final Durability durability)
|
||||
throws IOException;
|
||||
|
||||
|
@ -558,7 +524,7 @@ public interface RegionObserver extends Coprocessor {
|
|||
* @param durability Persistence guarantee for this Put
|
||||
* @throws IOException if an error occurred on the coprocessor
|
||||
*/
|
||||
void postPut(final ObserverContext<RegionCoprocessorEnvironment> c,
|
||||
void postPut(final ObserverContext<RegionCoprocessorEnvironment> c,
|
||||
final Put put, final WALEdit edit, final Durability durability)
|
||||
throws IOException;
|
||||
|
||||
|
@ -575,7 +541,7 @@ public interface RegionObserver extends Coprocessor {
|
|||
* @param durability Persistence guarantee for this Delete
|
||||
* @throws IOException if an error occurred on the coprocessor
|
||||
*/
|
||||
void preDelete(final ObserverContext<RegionCoprocessorEnvironment> c,
|
||||
void preDelete(final ObserverContext<RegionCoprocessorEnvironment> c,
|
||||
final Delete delete, final WALEdit edit, final Durability durability)
|
||||
throws IOException;
|
||||
/**
|
||||
|
@ -611,7 +577,7 @@ public interface RegionObserver extends Coprocessor {
|
|||
void postDelete(final ObserverContext<RegionCoprocessorEnvironment> c,
|
||||
final Delete delete, final WALEdit edit, final Durability durability)
|
||||
throws IOException;
|
||||
|
||||
|
||||
/**
|
||||
* This will be called for every batch mutation operation happening at the server. This will be
|
||||
* called after acquiring the locks on the mutating rows and after applying the proper timestamp
|
||||
|
@ -658,7 +624,7 @@ public interface RegionObserver extends Coprocessor {
|
|||
* Called after the completion of batch put/delete and will be called even if the batch operation
|
||||
* fails
|
||||
* @param ctx
|
||||
* @param miniBatchOp
|
||||
* @param miniBatchOp
|
||||
* @param success true if batch operation is successful otherwise false.
|
||||
* @throws IOException
|
||||
*/
|
||||
|
@ -679,7 +645,7 @@ public interface RegionObserver extends Coprocessor {
|
|||
* @param compareOp the comparison operation
|
||||
* @param comparator the comparator
|
||||
* @param put data to put if check succeeds
|
||||
* @param result
|
||||
* @param result
|
||||
* @return the return value to return to client if bypassing default
|
||||
* processing
|
||||
* @throws IOException if an error occurred on the coprocessor
|
||||
|
@ -693,8 +659,8 @@ public interface RegionObserver extends Coprocessor {
|
|||
/**
|
||||
* Called before checkAndPut but after acquiring rowlock.
|
||||
* <p>
|
||||
* <b>Note:</b> Caution to be taken for not doing any long time operation in this hook.
|
||||
* Row will be locked for longer time. Trying to acquire lock on another row, within this,
|
||||
* <b>Note:</b> Caution to be taken for not doing any long time operation in this hook.
|
||||
* Row will be locked for longer time. Trying to acquire lock on another row, within this,
|
||||
* can lead to potential deadlock.
|
||||
* <p>
|
||||
* Call CoprocessorEnvironment#bypass to skip default actions
|
||||
|
@ -708,14 +674,14 @@ public interface RegionObserver extends Coprocessor {
|
|||
* @param compareOp the comparison operation
|
||||
* @param comparator the comparator
|
||||
* @param put data to put if check succeeds
|
||||
* @param result
|
||||
* @param result
|
||||
* @return the return value to return to client if bypassing default
|
||||
* processing
|
||||
* @throws IOException if an error occurred on the coprocessor
|
||||
*/
|
||||
boolean preCheckAndPutAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> c,
|
||||
final byte[] row, final byte[] family, final byte[] qualifier, final CompareOp compareOp,
|
||||
final ByteArrayComparable comparator, final Put put,
|
||||
final ByteArrayComparable comparator, final Put put,
|
||||
final boolean result) throws IOException;
|
||||
|
||||
/**
|
||||
|
@ -754,7 +720,7 @@ public interface RegionObserver extends Coprocessor {
|
|||
* @param compareOp the comparison operation
|
||||
* @param comparator the comparator
|
||||
* @param delete delete to commit if check succeeds
|
||||
* @param result
|
||||
* @param result
|
||||
* @return the value to return to client if bypassing default processing
|
||||
* @throws IOException if an error occurred on the coprocessor
|
||||
*/
|
||||
|
@ -767,8 +733,8 @@ public interface RegionObserver extends Coprocessor {
|
|||
/**
|
||||
* Called before checkAndDelete but after acquiring rowock.
|
||||
* <p>
|
||||
* <b>Note:</b> Caution to be taken for not doing any long time operation in this hook.
|
||||
* Row will be locked for longer time. Trying to acquire lock on another row, within this,
|
||||
* <b>Note:</b> Caution to be taken for not doing any long time operation in this hook.
|
||||
* Row will be locked for longer time. Trying to acquire lock on another row, within this,
|
||||
* can lead to potential deadlock.
|
||||
* <p>
|
||||
* Call CoprocessorEnvironment#bypass to skip default actions
|
||||
|
@ -782,7 +748,7 @@ public interface RegionObserver extends Coprocessor {
|
|||
* @param compareOp the comparison operation
|
||||
* @param comparator the comparator
|
||||
* @param delete delete to commit if check succeeds
|
||||
* @param result
|
||||
* @param result
|
||||
* @return the value to return to client if bypassing default processing
|
||||
* @throws IOException if an error occurred on the coprocessor
|
||||
*/
|
||||
|
@ -877,8 +843,8 @@ public interface RegionObserver extends Coprocessor {
|
|||
/**
|
||||
* Called before Append but after acquiring rowlock.
|
||||
* <p>
|
||||
* <b>Note:</b> Caution to be taken for not doing any long time operation in this hook.
|
||||
* Row will be locked for longer time. Trying to acquire lock on another row, within this,
|
||||
* <b>Note:</b> Caution to be taken for not doing any long time operation in this hook.
|
||||
* Row will be locked for longer time. Trying to acquire lock on another row, within this,
|
||||
* can lead to potential deadlock.
|
||||
* <p>
|
||||
* Call CoprocessorEnvironment#bypass to skip default actions
|
||||
|
@ -927,14 +893,14 @@ public interface RegionObserver extends Coprocessor {
|
|||
/**
|
||||
* Called before Increment but after acquiring rowlock.
|
||||
* <p>
|
||||
* <b>Note:</b> Caution to be taken for not doing any long time operation in this hook.
|
||||
* Row will be locked for longer time. Trying to acquire lock on another row, within this,
|
||||
* <b>Note:</b> Caution to be taken for not doing any long time operation in this hook.
|
||||
* Row will be locked for longer time. Trying to acquire lock on another row, within this,
|
||||
* can lead to potential deadlock.
|
||||
* <p>
|
||||
* Call CoprocessorEnvironment#bypass to skip default actions
|
||||
* <p>
|
||||
* Call CoprocessorEnvironment#complete to skip any subsequent chained coprocessors
|
||||
*
|
||||
*
|
||||
* @param c
|
||||
* the environment provided by the region server
|
||||
* @param increment
|
||||
|
@ -1227,7 +1193,7 @@ public interface RegionObserver extends Coprocessor {
|
|||
* Called before creation of Reader for a store file.
|
||||
* Calling {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} has no
|
||||
* effect in this hook.
|
||||
*
|
||||
*
|
||||
* @param ctx the environment provided by the region server
|
||||
* @param fs fileystem to read from
|
||||
* @param p path to the file
|
||||
|
@ -1246,7 +1212,7 @@ public interface RegionObserver extends Coprocessor {
|
|||
|
||||
/**
|
||||
* Called after the creation of Reader for a store file.
|
||||
*
|
||||
*
|
||||
* @param ctx the environment provided by the region server
|
||||
* @param fs fileystem to read from
|
||||
* @param p path to the file
|
||||
|
|
|
@ -31,15 +31,14 @@ import java.util.concurrent.atomic.AtomicLong;
|
|||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.Cell;
|
||||
import org.apache.hadoop.hbase.CellComparator;
|
||||
import org.apache.hadoop.hbase.CellUtil;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.KeyValueUtil;
|
||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.client.Scan;
|
||||
import org.apache.hadoop.hbase.util.ByteRange;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
@ -384,85 +383,6 @@ public class DefaultMemStore implements MemStore {
|
|||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param state column/delete tracking state
|
||||
*/
|
||||
@Override
|
||||
public void getRowKeyAtOrBefore(final GetClosestRowBeforeTracker state) {
|
||||
getRowKeyAtOrBefore(cellSet, state);
|
||||
getRowKeyAtOrBefore(snapshot, state);
|
||||
}
|
||||
|
||||
/*
|
||||
* @param set
|
||||
* @param state Accumulates deletes and candidates.
|
||||
*/
|
||||
private void getRowKeyAtOrBefore(final NavigableSet<Cell> set,
|
||||
final GetClosestRowBeforeTracker state) {
|
||||
if (set.isEmpty()) {
|
||||
return;
|
||||
}
|
||||
if (!walkForwardInSingleRow(set, state.getTargetKey(), state)) {
|
||||
// Found nothing in row. Try backing up.
|
||||
getRowKeyBefore(set, state);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Walk forward in a row from <code>firstOnRow</code>. Presumption is that
|
||||
* we have been passed the first possible key on a row. As we walk forward
|
||||
* we accumulate deletes until we hit a candidate on the row at which point
|
||||
* we return.
|
||||
* @param set
|
||||
* @param firstOnRow First possible key on this row.
|
||||
* @param state
|
||||
* @return True if we found a candidate walking this row.
|
||||
*/
|
||||
private boolean walkForwardInSingleRow(final SortedSet<Cell> set,
|
||||
final Cell firstOnRow, final GetClosestRowBeforeTracker state) {
|
||||
boolean foundCandidate = false;
|
||||
SortedSet<Cell> tail = set.tailSet(firstOnRow);
|
||||
if (tail.isEmpty()) return foundCandidate;
|
||||
for (Iterator<Cell> i = tail.iterator(); i.hasNext();) {
|
||||
Cell kv = i.next();
|
||||
// Did we go beyond the target row? If so break.
|
||||
if (state.isTooFar(kv, firstOnRow)) break;
|
||||
if (state.isExpired(kv)) {
|
||||
i.remove();
|
||||
continue;
|
||||
}
|
||||
// If we added something, this row is a contender. break.
|
||||
if (state.handle(kv)) {
|
||||
foundCandidate = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return foundCandidate;
|
||||
}
|
||||
|
||||
/*
|
||||
* Walk backwards through the passed set a row at a time until we run out of
|
||||
* set or until we get a candidate.
|
||||
* @param set
|
||||
* @param state
|
||||
*/
|
||||
private void getRowKeyBefore(NavigableSet<Cell> set,
|
||||
final GetClosestRowBeforeTracker state) {
|
||||
Cell firstOnRow = state.getTargetKey();
|
||||
for (Member p = memberOfPreviousRow(set, state, firstOnRow);
|
||||
p != null; p = memberOfPreviousRow(p.set, state, firstOnRow)) {
|
||||
// Make sure we don't fall out of our table.
|
||||
if (!state.isTargetTable(p.cell)) break;
|
||||
// Stop looking if we've exited the better candidate range.
|
||||
if (!state.isBetterCandidate(p.cell)) break;
|
||||
// Make into firstOnRow
|
||||
firstOnRow = new KeyValue(p.cell.getRowArray(), p.cell.getRowOffset(), p.cell.getRowLength(),
|
||||
HConstants.LATEST_TIMESTAMP);
|
||||
// If we find something, break;
|
||||
if (walkForwardInSingleRow(p.set, firstOnRow, state)) break;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Only used by tests. TODO: Remove
|
||||
*
|
||||
|
@ -622,42 +542,6 @@ public class DefaultMemStore implements MemStore {
|
|||
return addedSize;
|
||||
}
|
||||
|
||||
/*
|
||||
* Immutable data structure to hold member found in set and the set it was
|
||||
* found in. Include set because it is carrying context.
|
||||
*/
|
||||
private static class Member {
|
||||
final Cell cell;
|
||||
final NavigableSet<Cell> set;
|
||||
Member(final NavigableSet<Cell> s, final Cell kv) {
|
||||
this.cell = kv;
|
||||
this.set = s;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* @param set Set to walk back in. Pass a first in row or we'll return
|
||||
* same row (loop).
|
||||
* @param state Utility and context.
|
||||
* @param firstOnRow First item on the row after the one we want to find a
|
||||
* member in.
|
||||
* @return Null or member of row previous to <code>firstOnRow</code>
|
||||
*/
|
||||
private Member memberOfPreviousRow(NavigableSet<Cell> set,
|
||||
final GetClosestRowBeforeTracker state, final Cell firstOnRow) {
|
||||
NavigableSet<Cell> head = set.headSet(firstOnRow, false);
|
||||
if (head.isEmpty()) return null;
|
||||
for (Iterator<Cell> i = head.descendingIterator(); i.hasNext();) {
|
||||
Cell found = i.next();
|
||||
if (state.isExpired(found)) {
|
||||
i.remove();
|
||||
continue;
|
||||
}
|
||||
return new Member(head, found);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return scanner on memstore and snapshot in this order.
|
||||
*/
|
||||
|
|
|
@ -1,242 +0,0 @@
|
|||
/*
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.regionserver;
|
||||
|
||||
import java.util.NavigableMap;
|
||||
import java.util.NavigableSet;
|
||||
import java.util.TreeMap;
|
||||
import java.util.TreeSet;
|
||||
|
||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.Cell;
|
||||
import org.apache.hadoop.hbase.CellComparator;
|
||||
import org.apache.hadoop.hbase.CellUtil;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
/**
|
||||
* State and utility processing {@link HRegion#getClosestRowBefore(byte[], byte[])}.
|
||||
* Like {@link ScanQueryMatcher} and {@link ScanDeleteTracker} but does not
|
||||
* implement the {@link DeleteTracker} interface since state spans rows (There
|
||||
* is no update nor reset method).
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
class GetClosestRowBeforeTracker {
|
||||
private final KeyValue targetkey;
|
||||
// Any cell w/ a ts older than this is expired.
|
||||
private final long now;
|
||||
private final long oldestUnexpiredTs;
|
||||
private Cell candidate = null;
|
||||
private final CellComparator cellComparator;
|
||||
// Flag for whether we're doing getclosest on a metaregion.
|
||||
private final boolean metaregion;
|
||||
// Offset and length into targetkey demarking table name (if in a metaregion).
|
||||
private final int rowoffset;
|
||||
private final int tablenamePlusDelimiterLength;
|
||||
|
||||
// Deletes keyed by row. Comparator compares on row portion of KeyValue only.
|
||||
private final NavigableMap<Cell, NavigableSet<Cell>> deletes;
|
||||
|
||||
/**
|
||||
* @param c
|
||||
* @param kv Presume first on row: i.e. empty column, maximum timestamp and
|
||||
* a type of Type.Maximum
|
||||
* @param ttl Time to live in ms for this Store
|
||||
* @param metaregion True if this is hbase:meta or -ROOT- region.
|
||||
*/
|
||||
GetClosestRowBeforeTracker(final CellComparator c, final KeyValue kv,
|
||||
final long ttl, final boolean metaregion) {
|
||||
super();
|
||||
this.metaregion = metaregion;
|
||||
this.targetkey = kv;
|
||||
// If we are in a metaregion, then our table name is the prefix on the
|
||||
// targetkey.
|
||||
this.rowoffset = kv.getRowOffset();
|
||||
int l = -1;
|
||||
if (metaregion) {
|
||||
l = Bytes.searchDelimiterIndex(kv.getRowArray(), rowoffset, kv.getRowLength(),
|
||||
HConstants.DELIMITER) - this.rowoffset;
|
||||
}
|
||||
this.tablenamePlusDelimiterLength = metaregion? l + 1: -1;
|
||||
this.now = System.currentTimeMillis();
|
||||
this.oldestUnexpiredTs = now - ttl;
|
||||
this.cellComparator = c;
|
||||
this.deletes = new TreeMap<Cell, NavigableSet<Cell>>(new CellComparator.RowComparator());
|
||||
}
|
||||
|
||||
/*
|
||||
* Add the specified KeyValue to the list of deletes.
|
||||
* @param kv
|
||||
*/
|
||||
private void addDelete(final Cell kv) {
|
||||
NavigableSet<Cell> rowdeletes = this.deletes.get(kv);
|
||||
if (rowdeletes == null) {
|
||||
rowdeletes = new TreeSet<Cell>(this.cellComparator);
|
||||
this.deletes.put(kv, rowdeletes);
|
||||
}
|
||||
rowdeletes.add(kv);
|
||||
}
|
||||
|
||||
/*
|
||||
* @param kv Adds candidate if nearer the target than previous candidate.
|
||||
* @return True if updated candidate.
|
||||
*/
|
||||
private boolean addCandidate(final Cell kv) {
|
||||
if (!isDeleted(kv) && isBetterCandidate(kv)) {
|
||||
this.candidate = kv;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
boolean isBetterCandidate(final Cell contender) {
|
||||
return this.candidate == null ||
|
||||
(this.cellComparator.compareRows(this.candidate, contender) < 0 &&
|
||||
this.cellComparator.compareRows(contender, this.targetkey) <= 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if specified KeyValue buffer has been deleted by a previously
|
||||
* seen delete.
|
||||
* @param kv
|
||||
* @return true is the specified KeyValue is deleted, false if not
|
||||
*/
|
||||
private boolean isDeleted(final Cell kv) {
|
||||
if (this.deletes.isEmpty()) return false;
|
||||
NavigableSet<Cell> rowdeletes = this.deletes.get(kv);
|
||||
if (rowdeletes == null || rowdeletes.isEmpty()) return false;
|
||||
return isDeleted(kv, rowdeletes);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if the specified KeyValue buffer has been deleted by a previously
|
||||
* seen delete.
|
||||
* @param kv
|
||||
* @param ds
|
||||
* @return True is the specified KeyValue is deleted, false if not
|
||||
*/
|
||||
public boolean isDeleted(final Cell kv, final NavigableSet<Cell> ds) {
|
||||
if (deletes == null || deletes.isEmpty()) return false;
|
||||
for (Cell d: ds) {
|
||||
long kvts = kv.getTimestamp();
|
||||
long dts = d.getTimestamp();
|
||||
if (CellUtil.isDeleteFamily(d)) {
|
||||
if (kvts <= dts) return true;
|
||||
continue;
|
||||
}
|
||||
// Check column
|
||||
int ret = CellComparator.compareQualifiers(kv, d);
|
||||
if (ret <= -1) {
|
||||
// This delete is for an earlier column.
|
||||
continue;
|
||||
} else if (ret >= 1) {
|
||||
// Beyond this kv.
|
||||
break;
|
||||
}
|
||||
// Check Timestamp
|
||||
if (kvts > dts) return false;
|
||||
|
||||
// Check Type
|
||||
switch (KeyValue.Type.codeToType(d.getTypeByte())) {
|
||||
case Delete: return kvts == dts;
|
||||
case DeleteColumn: return true;
|
||||
default: continue;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param cell
|
||||
* @return true if the cell is expired
|
||||
*/
|
||||
public boolean isExpired(final Cell cell) {
|
||||
return cell.getTimestamp() < this.oldestUnexpiredTs ||
|
||||
HStore.isCellTTLExpired(cell, this.oldestUnexpiredTs, this.now);
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle keys whose values hold deletes.
|
||||
* Add to the set of deletes and then if the candidate keys contain any that
|
||||
* might match, then check for a match and remove it. Implies candidates
|
||||
* is made with a Comparator that ignores key type.
|
||||
* @param kv
|
||||
* @return True if we removed <code>k</code> from <code>candidates</code>.
|
||||
*/
|
||||
boolean handleDeletes(final Cell kv) {
|
||||
addDelete(kv);
|
||||
boolean deleted = false;
|
||||
if (!hasCandidate()) return deleted;
|
||||
if (isDeleted(this.candidate)) {
|
||||
this.candidate = null;
|
||||
deleted = true;
|
||||
}
|
||||
return deleted;
|
||||
}
|
||||
|
||||
/**
|
||||
* Do right thing with passed key, add to deletes or add to candidates.
|
||||
* @param kv
|
||||
* @return True if we added a candidate
|
||||
*/
|
||||
boolean handle(final Cell kv) {
|
||||
if (CellUtil.isDelete(kv)) {
|
||||
handleDeletes(kv);
|
||||
return false;
|
||||
}
|
||||
return addCandidate(kv);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return True if has candidate
|
||||
*/
|
||||
public boolean hasCandidate() {
|
||||
return this.candidate != null;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Best candidate or null.
|
||||
*/
|
||||
public Cell getCandidate() {
|
||||
return this.candidate;
|
||||
}
|
||||
|
||||
public KeyValue getTargetKey() {
|
||||
return this.targetkey;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param kv Current kv
|
||||
* @param firstOnRow on row kv.
|
||||
* @return True if we went too far, past the target key.
|
||||
*/
|
||||
boolean isTooFar(final Cell kv, final Cell firstOnRow) {
|
||||
return this.cellComparator.compareRows(kv, firstOnRow) > 0;
|
||||
}
|
||||
|
||||
boolean isTargetTable(final Cell kv) {
|
||||
if (!metaregion) return true;
|
||||
// Compare start of keys row. Compare including delimiter. Saves having
|
||||
// to calculate where tablename ends in the candidate kv.
|
||||
return Bytes.compareTo(this.targetkey.getRowArray(), this.rowoffset,
|
||||
this.tablenamePlusDelimiterLength,
|
||||
kv.getRowArray(), kv.getRowOffset(), this.tablenamePlusDelimiterLength) == 0;
|
||||
}
|
||||
}
|
|
@ -2430,38 +2430,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
|
|||
// get() methods for client use.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
@Override
|
||||
public Result getClosestRowBefore(final byte [] row, final byte [] family) throws IOException {
|
||||
if (coprocessorHost != null) {
|
||||
Result result = new Result();
|
||||
if (coprocessorHost.preGetClosestRowBefore(row, family, result)) {
|
||||
return result;
|
||||
}
|
||||
}
|
||||
// look across all the HStores for this region and determine what the
|
||||
// closest key is across all column families, since the data may be sparse
|
||||
checkRow(row, "getClosestRowBefore");
|
||||
startRegionOperation(Operation.GET);
|
||||
this.readRequestsCount.increment();
|
||||
try {
|
||||
Store store = getStore(family);
|
||||
// get the closest key. (HStore.getRowKeyAtOrBefore can return null)
|
||||
Cell key = store.getRowKeyAtOrBefore(row);
|
||||
Result result = null;
|
||||
if (key != null) {
|
||||
Get get = new Get(CellUtil.cloneRow(key));
|
||||
get.addFamily(family);
|
||||
result = get(get);
|
||||
}
|
||||
if (coprocessorHost != null) {
|
||||
coprocessorHost.postGetClosestRowBefore(row, family, result);
|
||||
}
|
||||
return result;
|
||||
} finally {
|
||||
closeRegionOperation(Operation.GET);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public RegionScanner getScanner(Scan scan) throws IOException {
|
||||
return getScanner(scan, null);
|
||||
|
|
|
@ -1774,154 +1774,6 @@ public class HStore implements Store {
|
|||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Cell getRowKeyAtOrBefore(final byte[] row) throws IOException {
|
||||
// If minVersions is set, we will not ignore expired KVs.
|
||||
// As we're only looking for the latest matches, that should be OK.
|
||||
// With minVersions > 0 we guarantee that any KV that has any version
|
||||
// at all (expired or not) has at least one version that will not expire.
|
||||
// Note that this method used to take a KeyValue as arguments. KeyValue
|
||||
// can be back-dated, a row key cannot.
|
||||
long ttlToUse = scanInfo.getMinVersions() > 0 ? Long.MAX_VALUE : this.scanInfo.getTtl();
|
||||
|
||||
KeyValue kv = new KeyValue(row, HConstants.LATEST_TIMESTAMP);
|
||||
|
||||
GetClosestRowBeforeTracker state = new GetClosestRowBeforeTracker(
|
||||
this.comparator, kv, ttlToUse, this.getRegionInfo().isMetaRegion());
|
||||
this.lock.readLock().lock();
|
||||
try {
|
||||
// First go to the memstore. Pick up deletes and candidates.
|
||||
this.memstore.getRowKeyAtOrBefore(state);
|
||||
// Check if match, if we got a candidate on the asked for 'kv' row.
|
||||
// Process each relevant store file. Run through from newest to oldest.
|
||||
Iterator<StoreFile> sfIterator = this.storeEngine.getStoreFileManager()
|
||||
.getCandidateFilesForRowKeyBefore(state.getTargetKey());
|
||||
while (sfIterator.hasNext()) {
|
||||
StoreFile sf = sfIterator.next();
|
||||
sfIterator.remove(); // Remove sf from iterator.
|
||||
boolean haveNewCandidate = rowAtOrBeforeFromStoreFile(sf, state);
|
||||
Cell candidate = state.getCandidate();
|
||||
// we have an optimization here which stops the search if we find exact match.
|
||||
if (candidate != null && CellUtil.matchingRow(candidate, row)) {
|
||||
return candidate;
|
||||
}
|
||||
if (haveNewCandidate) {
|
||||
sfIterator = this.storeEngine.getStoreFileManager().updateCandidateFilesForRowKeyBefore(
|
||||
sfIterator, state.getTargetKey(), candidate);
|
||||
}
|
||||
}
|
||||
return state.getCandidate();
|
||||
} finally {
|
||||
this.lock.readLock().unlock();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Check an individual MapFile for the row at or before a given row.
|
||||
* @param f
|
||||
* @param state
|
||||
* @throws IOException
|
||||
* @return True iff the candidate has been updated in the state.
|
||||
*/
|
||||
private boolean rowAtOrBeforeFromStoreFile(final StoreFile f,
|
||||
final GetClosestRowBeforeTracker state)
|
||||
throws IOException {
|
||||
StoreFile.Reader r = f.getReader();
|
||||
if (r == null) {
|
||||
LOG.warn("StoreFile " + f + " has a null Reader");
|
||||
return false;
|
||||
}
|
||||
if (r.getEntries() == 0) {
|
||||
LOG.warn("StoreFile " + f + " is a empty store file");
|
||||
return false;
|
||||
}
|
||||
// TODO: Cache these keys rather than make each time?
|
||||
Cell firstKV = r.getFirstKey();
|
||||
if (firstKV == null) return false;
|
||||
Cell lastKV = r.getLastKey();
|
||||
Cell firstOnRow = state.getTargetKey();
|
||||
if (this.comparator.compareRows(lastKV, firstOnRow) < 0) {
|
||||
// If last key in file is not of the target table, no candidates in this
|
||||
// file. Return.
|
||||
if (!state.isTargetTable(lastKV)) return false;
|
||||
// If the row we're looking for is past the end of file, set search key to
|
||||
// last key. TODO: Cache last and first key rather than make each time.
|
||||
firstOnRow = CellUtil.createFirstOnRow(lastKV);
|
||||
}
|
||||
// Get a scanner that caches blocks and that uses pread.
|
||||
HFileScanner scanner = r.getScanner(true, true, false);
|
||||
// Seek scanner. If can't seek it, return.
|
||||
if (!seekToScanner(scanner, firstOnRow, firstKV)) return false;
|
||||
// If we found candidate on firstOnRow, just return. THIS WILL NEVER HAPPEN!
|
||||
// Unlikely that there'll be an instance of actual first row in table.
|
||||
if (walkForwardInSingleRow(scanner, firstOnRow, state)) return true;
|
||||
// If here, need to start backing up.
|
||||
while (scanner.seekBefore(firstOnRow)) {
|
||||
Cell kv = scanner.getCell();
|
||||
if (!state.isTargetTable(kv)) break;
|
||||
if (!state.isBetterCandidate(kv)) break;
|
||||
// Make new first on row.
|
||||
firstOnRow = CellUtil.createFirstOnRow(kv);
|
||||
// Seek scanner. If can't seek it, break.
|
||||
if (!seekToScanner(scanner, firstOnRow, firstKV)) return false;
|
||||
// If we find something, break;
|
||||
if (walkForwardInSingleRow(scanner, firstOnRow, state)) return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Seek the file scanner to firstOnRow or first entry in file.
|
||||
* @param scanner
|
||||
* @param firstOnRow
|
||||
* @param firstKV
|
||||
* @return True if we successfully seeked scanner.
|
||||
* @throws IOException
|
||||
*/
|
||||
private boolean seekToScanner(final HFileScanner scanner,
|
||||
final Cell firstOnRow,
|
||||
final Cell firstKV)
|
||||
throws IOException {
|
||||
Cell kv = firstOnRow;
|
||||
// If firstOnRow < firstKV, set to firstKV
|
||||
if (this.comparator.compareRows(firstKV, firstOnRow) == 0) kv = firstKV;
|
||||
int result = scanner.seekTo(kv);
|
||||
return result != -1;
|
||||
}
|
||||
|
||||
/*
|
||||
* When we come in here, we are probably at the kv just before we break into
|
||||
* the row that firstOnRow is on. Usually need to increment one time to get
|
||||
* on to the row we are interested in.
|
||||
* @param scanner
|
||||
* @param firstOnRow
|
||||
* @param state
|
||||
* @return True we found a candidate.
|
||||
* @throws IOException
|
||||
*/
|
||||
private boolean walkForwardInSingleRow(final HFileScanner scanner,
|
||||
final Cell firstOnRow,
|
||||
final GetClosestRowBeforeTracker state)
|
||||
throws IOException {
|
||||
boolean foundCandidate = false;
|
||||
do {
|
||||
Cell kv = scanner.getCell();
|
||||
// If we are not in the row, skip.
|
||||
if (this.comparator.compareRows(kv, firstOnRow) < 0) continue;
|
||||
// Did we go beyond the target row? If so break.
|
||||
if (state.isTooFar(kv, firstOnRow)) break;
|
||||
if (state.isExpired(kv)) {
|
||||
continue;
|
||||
}
|
||||
// If we added something, this row is a contender. break.
|
||||
if (state.handle(kv)) {
|
||||
foundCandidate = true;
|
||||
break;
|
||||
}
|
||||
} while(scanner.next());
|
||||
return foundCandidate;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean canSplit() {
|
||||
this.lock.readLock().lock();
|
||||
|
|
|
@ -92,13 +92,6 @@ public interface MemStore extends HeapSize {
|
|||
*/
|
||||
long delete(final Cell deleteCell);
|
||||
|
||||
/**
|
||||
* Find the key that matches <i>row</i> exactly, or the one that immediately precedes it. The
|
||||
* target row key is set in state.
|
||||
* @param state column/delete tracking state
|
||||
*/
|
||||
void getRowKeyAtOrBefore(final GetClosestRowBeforeTracker state);
|
||||
|
||||
/**
|
||||
* Given the specs of a column, update it, first by inserting a new record,
|
||||
* then removing the old one. Since there is only 1 KeyValue involved, the memstoreTS
|
||||
|
|
|
@ -1933,32 +1933,21 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
|
|||
|
||||
quota = getQuotaManager().checkQuota(region, OperationQuota.OperationType.GET);
|
||||
|
||||
if (get.hasClosestRowBefore() && get.getClosestRowBefore()) {
|
||||
if (get.getColumnCount() != 1) {
|
||||
throw new DoNotRetryIOException(
|
||||
"get ClosestRowBefore supports one and only one family now, not "
|
||||
+ get.getColumnCount() + " families");
|
||||
}
|
||||
byte[] row = get.getRow().toByteArray();
|
||||
byte[] family = get.getColumn(0).getFamily().toByteArray();
|
||||
r = region.getClosestRowBefore(row, family);
|
||||
} else {
|
||||
Get clientGet = ProtobufUtil.toGet(get);
|
||||
if (get.getExistenceOnly() && region.getCoprocessorHost() != null) {
|
||||
existence = region.getCoprocessorHost().preExists(clientGet);
|
||||
}
|
||||
if (existence == null) {
|
||||
r = region.get(clientGet);
|
||||
if (get.getExistenceOnly()) {
|
||||
boolean exists = r.getExists();
|
||||
if (region.getCoprocessorHost() != null) {
|
||||
exists = region.getCoprocessorHost().postExists(clientGet, exists);
|
||||
}
|
||||
existence = exists;
|
||||
Get clientGet = ProtobufUtil.toGet(get);
|
||||
if (get.getExistenceOnly() && region.getCoprocessorHost() != null) {
|
||||
existence = region.getCoprocessorHost().preExists(clientGet);
|
||||
}
|
||||
if (existence == null) {
|
||||
r = region.get(clientGet);
|
||||
if (get.getExistenceOnly()) {
|
||||
boolean exists = r.getExists();
|
||||
if (region.getCoprocessorHost() != null) {
|
||||
exists = region.getCoprocessorHost().postExists(clientGet, exists);
|
||||
}
|
||||
existence = exists;
|
||||
}
|
||||
}
|
||||
if (existence != null){
|
||||
if (existence != null) {
|
||||
ClientProtos.Result pbr =
|
||||
ProtobufUtil.toResult(existence, region.getRegionInfo().getReplicaId() != 0);
|
||||
builder.setResult(pbr);
|
||||
|
@ -1974,8 +1963,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
|
|||
throw new ServiceException(ie);
|
||||
} finally {
|
||||
if (regionServer.metricsRegionServer != null) {
|
||||
regionServer.metricsRegionServer.updateGet(
|
||||
EnvironmentEdgeManager.currentTime() - before);
|
||||
regionServer.metricsRegionServer.updateGet(EnvironmentEdgeManager.currentTime() - before);
|
||||
}
|
||||
if (quota != null) {
|
||||
quota.close();
|
||||
|
|
|
@ -380,17 +380,6 @@ public interface Region extends ConfigurationObserver {
|
|||
*/
|
||||
List<Cell> get(Get get, boolean withCoprocessor) throws IOException;
|
||||
|
||||
/**
|
||||
* Return all the data for the row that matches <i>row</i> exactly,
|
||||
* or the one that immediately preceeds it, at or immediately before
|
||||
* <i>ts</i>.
|
||||
* @param row
|
||||
* @param family
|
||||
* @return result of the operation
|
||||
* @throws IOException
|
||||
*/
|
||||
Result getClosestRowBefore(byte[] row, byte[] family) throws IOException;
|
||||
|
||||
/**
|
||||
* Return an iterator that scans over the HRegion, returning the indicated
|
||||
* columns and rows specified by the {@link Scan}.
|
||||
|
|
|
@ -782,41 +782,6 @@ public class RegionCoprocessorHost
|
|||
|
||||
// RegionObserver support
|
||||
|
||||
/**
|
||||
* @param row the row key
|
||||
* @param family the family
|
||||
* @param result the result set from the region
|
||||
* @return true if default processing should be bypassed
|
||||
* @exception IOException Exception
|
||||
*/
|
||||
public boolean preGetClosestRowBefore(final byte[] row, final byte[] family,
|
||||
final Result result) throws IOException {
|
||||
return execOperation(coprocessors.isEmpty() ? null : new RegionOperation() {
|
||||
@Override
|
||||
public void call(RegionObserver oserver, ObserverContext<RegionCoprocessorEnvironment> ctx)
|
||||
throws IOException {
|
||||
oserver.preGetClosestRowBefore(ctx, row, family, result);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* @param row the row key
|
||||
* @param family the family
|
||||
* @param result the result set from the region
|
||||
* @exception IOException Exception
|
||||
*/
|
||||
public void postGetClosestRowBefore(final byte[] row, final byte[] family,
|
||||
final Result result) throws IOException {
|
||||
execOperation(coprocessors.isEmpty() ? null : new RegionOperation() {
|
||||
@Override
|
||||
public void call(RegionObserver oserver, ObserverContext<RegionCoprocessorEnvironment> ctx)
|
||||
throws IOException {
|
||||
oserver.postGetClosestRowBefore(ctx, row, family, result);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* @param get the Get request
|
||||
* @return true if default processing should be bypassed
|
||||
|
|
|
@ -143,20 +143,6 @@ public interface Store extends HeapSize, StoreConfigInformation, PropagatingConf
|
|||
*/
|
||||
void rollback(final Cell cell);
|
||||
|
||||
/**
|
||||
* Find the key that matches <i>row</i> exactly, or the one that immediately precedes it. WARNING:
|
||||
* Only use this method on a table where writes occur with strictly increasing timestamps. This
|
||||
* method assumes this pattern of writes in order to make it reasonably performant. Also our
|
||||
* search is dependent on the axiom that deletes are for cells that are in the container that
|
||||
* follows whether a memstore snapshot or a storefile, not for the current container: i.e. we'll
|
||||
* see deletes before we come across cells we are to delete. Presumption is that the
|
||||
* memstore#kvset is processed before memstore#snapshot and so on.
|
||||
* @param row The row key of the targeted row.
|
||||
* @return Found Cell or null if none found.
|
||||
* @throws IOException
|
||||
*/
|
||||
Cell getRowKeyAtOrBefore(final byte[] row) throws IOException;
|
||||
|
||||
FileSystem getFileSystem();
|
||||
|
||||
/*
|
||||
|
|
|
@ -661,7 +661,6 @@ public class AccessController extends BaseMasterAndRegionObserver
|
|||
}
|
||||
|
||||
private enum OpType {
|
||||
GET_CLOSEST_ROW_BEFORE("getClosestRowBefore"),
|
||||
GET("get"),
|
||||
EXISTS("exists"),
|
||||
SCAN("scan"),
|
||||
|
@ -1424,28 +1423,6 @@ public class AccessController extends BaseMasterAndRegionObserver
|
|||
return scanner;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preGetClosestRowBefore(final ObserverContext<RegionCoprocessorEnvironment> c,
|
||||
final byte [] row, final byte [] family, final Result result)
|
||||
throws IOException {
|
||||
assert family != null;
|
||||
RegionCoprocessorEnvironment env = c.getEnvironment();
|
||||
Map<byte[],? extends Collection<byte[]>> families = makeFamilyMap(family, null);
|
||||
User user = getActiveUser();
|
||||
AuthResult authResult = permissionGranted(OpType.GET_CLOSEST_ROW_BEFORE, user, env, families,
|
||||
Action.READ);
|
||||
if (!authResult.isAllowed() && cellFeaturesEnabled && !compatibleEarlyTermination) {
|
||||
authResult.setAllowed(checkCoveringPermission(OpType.GET_CLOSEST_ROW_BEFORE, env, row,
|
||||
families, HConstants.LATEST_TIMESTAMP, Action.READ));
|
||||
authResult.setReason("Covering cell set");
|
||||
}
|
||||
logResult(authResult);
|
||||
if (authorizationEnabled && !authResult.isAllowed()) {
|
||||
throw new AccessDeniedException("Insufficient permissions " +
|
||||
authResult.toContextString());
|
||||
}
|
||||
}
|
||||
|
||||
private void internalPreRead(final ObserverContext<RegionCoprocessorEnvironment> c,
|
||||
final Query query, OpType opType) throws IOException {
|
||||
Filter filter = query.getFilter();
|
||||
|
|
|
@ -92,6 +92,7 @@ import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
|||
import org.apache.hadoop.hbase.regionserver.HStore;
|
||||
import org.apache.hadoop.hbase.regionserver.InternalScanner;
|
||||
import org.apache.hadoop.hbase.regionserver.Region;
|
||||
import org.apache.hadoop.hbase.regionserver.RegionScanner;
|
||||
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
|
||||
import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
|
||||
import org.apache.hadoop.hbase.regionserver.wal.MetricsWAL;
|
||||
|
@ -4155,4 +4156,28 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
|
|||
}
|
||||
return supportedAlgos.toArray(new Algorithm[supportedAlgos.size()]);
|
||||
}
|
||||
|
||||
public Result getClosestRowBefore(Region r, byte[] row, byte[] family) throws IOException {
|
||||
Scan scan = new Scan(row);
|
||||
scan.setSmall(true);
|
||||
scan.setCaching(1);
|
||||
scan.setReversed(true);
|
||||
scan.addFamily(family);
|
||||
try (RegionScanner scanner = r.getScanner(scan)) {
|
||||
List<Cell> cells = new ArrayList<Cell>(1);
|
||||
scanner.next(cells);
|
||||
if (r.getRegionInfo().isMetaRegion() && !isTargetTable(row, cells.get(0))) {
|
||||
return null;
|
||||
}
|
||||
return Result.create(cells);
|
||||
}
|
||||
}
|
||||
|
||||
private boolean isTargetTable(final byte[] inRow, Cell c) {
|
||||
String inputRowString = Bytes.toString(inRow);
|
||||
int i = inputRowString.indexOf(HConstants.DELIMITER);
|
||||
String outputRowString = Bytes.toString(c.getRowArray(), c.getRowOffset(), c.getRowLength());
|
||||
int o = outputRowString.indexOf(HConstants.DELIMITER);
|
||||
return inputRowString.substring(0, i).equals(outputRowString.substring(0, o));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4209,83 +4209,56 @@ public class TestFromClientSide {
|
|||
region.flush(true);
|
||||
|
||||
Result result;
|
||||
Get get = null;
|
||||
|
||||
// Test before first that null is returned
|
||||
get = new Get(beforeFirstRow);
|
||||
get.setClosestRowBefore(true);
|
||||
get.addFamily(HConstants.CATALOG_FAMILY);
|
||||
result = table.get(get);
|
||||
assertTrue(result.isEmpty());
|
||||
result = getReverseScanResult(table, beforeFirstRow,
|
||||
HConstants.CATALOG_FAMILY);
|
||||
assertNull(result);
|
||||
|
||||
// Test at first that first is returned
|
||||
get = new Get(firstRow);
|
||||
get.setClosestRowBefore(true);
|
||||
get.addFamily(HConstants.CATALOG_FAMILY);
|
||||
result = table.get(get);
|
||||
result = getReverseScanResult(table, firstRow, HConstants.CATALOG_FAMILY);
|
||||
assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null));
|
||||
assertTrue(Bytes.equals(result.getRow(), firstRow));
|
||||
assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), one));
|
||||
|
||||
// Test in between first and second that first is returned
|
||||
get = new Get(beforeSecondRow);
|
||||
get.setClosestRowBefore(true);
|
||||
get.addFamily(HConstants.CATALOG_FAMILY);
|
||||
result = table.get(get);
|
||||
result = getReverseScanResult(table, beforeSecondRow, HConstants.CATALOG_FAMILY);
|
||||
assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null));
|
||||
assertTrue(Bytes.equals(result.getRow(), firstRow));
|
||||
assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), one));
|
||||
|
||||
// Test at second make sure second is returned
|
||||
get = new Get(secondRow);
|
||||
get.setClosestRowBefore(true);
|
||||
get.addFamily(HConstants.CATALOG_FAMILY);
|
||||
result = table.get(get);
|
||||
result = getReverseScanResult(table, secondRow, HConstants.CATALOG_FAMILY);
|
||||
assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null));
|
||||
assertTrue(Bytes.equals(result.getRow(), secondRow));
|
||||
assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), two));
|
||||
|
||||
// Test in second and third, make sure second is returned
|
||||
get = new Get(beforeThirdRow);
|
||||
get.setClosestRowBefore(true);
|
||||
get.addFamily(HConstants.CATALOG_FAMILY);
|
||||
result = table.get(get);
|
||||
result = getReverseScanResult(table, beforeThirdRow, HConstants.CATALOG_FAMILY);
|
||||
assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null));
|
||||
assertTrue(Bytes.equals(result.getRow(), secondRow));
|
||||
assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), two));
|
||||
|
||||
// Test at third make sure third is returned
|
||||
get = new Get(thirdRow);
|
||||
get.setClosestRowBefore(true);
|
||||
get.addFamily(HConstants.CATALOG_FAMILY);
|
||||
result = table.get(get);
|
||||
result = getReverseScanResult(table, thirdRow, HConstants.CATALOG_FAMILY);
|
||||
assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null));
|
||||
assertTrue(Bytes.equals(result.getRow(), thirdRow));
|
||||
assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), three));
|
||||
|
||||
// Test in third and forth, make sure third is returned
|
||||
get = new Get(beforeForthRow);
|
||||
get.setClosestRowBefore(true);
|
||||
get.addFamily(HConstants.CATALOG_FAMILY);
|
||||
result = table.get(get);
|
||||
result = getReverseScanResult(table, beforeForthRow, HConstants.CATALOG_FAMILY);
|
||||
assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null));
|
||||
assertTrue(Bytes.equals(result.getRow(), thirdRow));
|
||||
assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), three));
|
||||
|
||||
// Test at forth make sure forth is returned
|
||||
get = new Get(forthRow);
|
||||
get.setClosestRowBefore(true);
|
||||
get.addFamily(HConstants.CATALOG_FAMILY);
|
||||
result = table.get(get);
|
||||
result = getReverseScanResult(table, forthRow, HConstants.CATALOG_FAMILY);
|
||||
assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null));
|
||||
assertTrue(Bytes.equals(result.getRow(), forthRow));
|
||||
assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), four));
|
||||
|
||||
// Test after forth make sure forth is returned
|
||||
get = new Get(Bytes.add(forthRow, one));
|
||||
get.setClosestRowBefore(true);
|
||||
get.addFamily(HConstants.CATALOG_FAMILY);
|
||||
result = table.get(get);
|
||||
result = getReverseScanResult(table, Bytes.add(forthRow, one), HConstants.CATALOG_FAMILY);
|
||||
assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null));
|
||||
assertTrue(Bytes.equals(result.getRow(), forthRow));
|
||||
assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), four));
|
||||
|
@ -4293,6 +4266,17 @@ public class TestFromClientSide {
|
|||
}
|
||||
}
|
||||
|
||||
private Result getReverseScanResult(Table table, byte[] row, byte[] fam) throws IOException {
|
||||
Scan scan = new Scan(row);
|
||||
scan.setSmall(true);
|
||||
scan.setReversed(true);
|
||||
scan.setCaching(1);
|
||||
scan.addFamily(fam);
|
||||
try (ResultScanner scanner = table.getScanner(scan)) {
|
||||
return scanner.next();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* For HBASE-2156
|
||||
* @throws Exception
|
||||
|
|
|
@ -85,7 +85,6 @@ public class TestFromClientSideNoCodec {
|
|||
// Check getRowOrBefore
|
||||
byte[] f = fs[0];
|
||||
Get get = new Get(row);
|
||||
get.setClosestRowBefore(true);
|
||||
get.addFamily(f);
|
||||
r = ht.get(get);
|
||||
assertTrue(r.toString(), r.containsColumn(f, f));
|
||||
|
|
|
@ -102,8 +102,6 @@ public class SimpleRegionObserver extends BaseRegionObserver {
|
|||
final AtomicInteger ctPreDeleted = new AtomicInteger(0);
|
||||
final AtomicInteger ctPrePrepareDeleteTS = new AtomicInteger(0);
|
||||
final AtomicInteger ctPostDeleted = new AtomicInteger(0);
|
||||
final AtomicInteger ctPreGetClosestRowBefore = new AtomicInteger(0);
|
||||
final AtomicInteger ctPostGetClosestRowBefore = new AtomicInteger(0);
|
||||
final AtomicInteger ctPreIncrement = new AtomicInteger(0);
|
||||
final AtomicInteger ctPreIncrementAfterRowLock = new AtomicInteger(0);
|
||||
final AtomicInteger ctPreAppend = new AtomicInteger(0);
|
||||
|
@ -517,32 +515,6 @@ public class SimpleRegionObserver extends BaseRegionObserver {
|
|||
ctPostBatchMutateIndispensably.incrementAndGet();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preGetClosestRowBefore(final ObserverContext<RegionCoprocessorEnvironment> c,
|
||||
final byte[] row, final byte[] family, final Result result)
|
||||
throws IOException {
|
||||
RegionCoprocessorEnvironment e = c.getEnvironment();
|
||||
assertNotNull(e);
|
||||
assertNotNull(e.getRegion());
|
||||
assertNotNull(row);
|
||||
assertNotNull(result);
|
||||
if (ctBeforeDelete.get() > 0) {
|
||||
ctPreGetClosestRowBefore.incrementAndGet();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void postGetClosestRowBefore(final ObserverContext<RegionCoprocessorEnvironment> c,
|
||||
final byte[] row, final byte[] family, final Result result)
|
||||
throws IOException {
|
||||
RegionCoprocessorEnvironment e = c.getEnvironment();
|
||||
assertNotNull(e);
|
||||
assertNotNull(e.getRegion());
|
||||
assertNotNull(row);
|
||||
assertNotNull(result);
|
||||
ctPostGetClosestRowBefore.incrementAndGet();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Result preIncrement(final ObserverContext<RegionCoprocessorEnvironment> c,
|
||||
final Increment increment) throws IOException {
|
||||
|
@ -940,14 +912,6 @@ public class SimpleRegionObserver extends BaseRegionObserver {
|
|||
return ctPostDeleted.get();
|
||||
}
|
||||
|
||||
public int getCtPreGetClosestRowBefore() {
|
||||
return ctPreGetClosestRowBefore.get();
|
||||
}
|
||||
|
||||
public int getCtPostGetClosestRowBefore() {
|
||||
return ctPostGetClosestRowBefore.get();
|
||||
}
|
||||
|
||||
public int getCtPreIncrement() {
|
||||
return ctPreIncrement.get();
|
||||
}
|
||||
|
|
|
@ -33,7 +33,6 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
|
|||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.MetaTableAccessor;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.Delete;
|
||||
|
@ -66,6 +65,8 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase {
|
|||
private static final byte[] T35 = Bytes.toBytes("035");
|
||||
private static final byte[] T40 = Bytes.toBytes("040");
|
||||
|
||||
private static HBaseTestingUtility UTIL = new HBaseTestingUtility();
|
||||
|
||||
|
||||
|
||||
@Test
|
||||
|
@ -160,7 +161,7 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase {
|
|||
tableb, tofindBytes,
|
||||
HConstants.NINES, false);
|
||||
LOG.info("find=" + new String(metaKey));
|
||||
Result r = mr.getClosestRowBefore(metaKey, HConstants.CATALOG_FAMILY);
|
||||
Result r = UTIL.getClosestRowBefore(mr, metaKey, HConstants.CATALOG_FAMILY);
|
||||
if (answer == -1) {
|
||||
assertNull(r);
|
||||
return null;
|
||||
|
@ -206,38 +207,38 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase {
|
|||
p.add(c0, c0, T20);
|
||||
region.put(p);
|
||||
|
||||
Result r = region.getClosestRowBefore(T20, c0);
|
||||
Result r = UTIL.getClosestRowBefore(region, T20, c0);
|
||||
assertTrue(Bytes.equals(T20, r.getRow()));
|
||||
|
||||
Delete d = new Delete(T20);
|
||||
d.deleteColumn(c0, c0);
|
||||
region.delete(d);
|
||||
|
||||
r = region.getClosestRowBefore(T20, c0);
|
||||
r = UTIL.getClosestRowBefore(region, T20, c0);
|
||||
assertTrue(Bytes.equals(T10, r.getRow()));
|
||||
|
||||
p = new Put(T30);
|
||||
p.add(c0, c0, T30);
|
||||
region.put(p);
|
||||
|
||||
r = region.getClosestRowBefore(T30, c0);
|
||||
r = UTIL.getClosestRowBefore(region, T30, c0);
|
||||
assertTrue(Bytes.equals(T30, r.getRow()));
|
||||
|
||||
d = new Delete(T30);
|
||||
d.deleteColumn(c0, c0);
|
||||
region.delete(d);
|
||||
|
||||
r = region.getClosestRowBefore(T30, c0);
|
||||
r = UTIL.getClosestRowBefore(region, T30, c0);
|
||||
assertTrue(Bytes.equals(T10, r.getRow()));
|
||||
r = region.getClosestRowBefore(T31, c0);
|
||||
r = UTIL.getClosestRowBefore(region, T31, c0);
|
||||
assertTrue(Bytes.equals(T10, r.getRow()));
|
||||
|
||||
region.flush(true);
|
||||
|
||||
// try finding "010" after flush
|
||||
r = region.getClosestRowBefore(T30, c0);
|
||||
r = UTIL.getClosestRowBefore(region, T30, c0);
|
||||
assertTrue(Bytes.equals(T10, r.getRow()));
|
||||
r = region.getClosestRowBefore(T31, c0);
|
||||
r = UTIL.getClosestRowBefore(region, T31, c0);
|
||||
assertTrue(Bytes.equals(T10, r.getRow()));
|
||||
|
||||
// Put into a different column family. Should make it so I still get t10
|
||||
|
@ -245,16 +246,16 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase {
|
|||
p.add(c1, c1, T20);
|
||||
region.put(p);
|
||||
|
||||
r = region.getClosestRowBefore(T30, c0);
|
||||
r = UTIL.getClosestRowBefore(region, T30, c0);
|
||||
assertTrue(Bytes.equals(T10, r.getRow()));
|
||||
r = region.getClosestRowBefore(T31, c0);
|
||||
r = UTIL.getClosestRowBefore(region, T31, c0);
|
||||
assertTrue(Bytes.equals(T10, r.getRow()));
|
||||
|
||||
region.flush(true);
|
||||
|
||||
r = region.getClosestRowBefore(T30, c0);
|
||||
r = UTIL.getClosestRowBefore(region, T30, c0);
|
||||
assertTrue(Bytes.equals(T10, r.getRow()));
|
||||
r = region.getClosestRowBefore(T31, c0);
|
||||
r = UTIL.getClosestRowBefore(region, T31, c0);
|
||||
assertTrue(Bytes.equals(T10, r.getRow()));
|
||||
|
||||
// Now try combo of memcache and mapfiles. Delete the t20 COLUMS[1]
|
||||
|
@ -262,14 +263,14 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase {
|
|||
d = new Delete(T20);
|
||||
d.deleteColumn(c1, c1);
|
||||
region.delete(d);
|
||||
r = region.getClosestRowBefore(T30, c0);
|
||||
r = UTIL.getClosestRowBefore(region, T30, c0);
|
||||
assertTrue(Bytes.equals(T10, r.getRow()));
|
||||
|
||||
// Ask for a value off the end of the file. Should return t10.
|
||||
r = region.getClosestRowBefore(T31, c0);
|
||||
r = UTIL.getClosestRowBefore(region, T31, c0);
|
||||
assertTrue(Bytes.equals(T10, r.getRow()));
|
||||
region.flush(true);
|
||||
r = region.getClosestRowBefore(T31, c0);
|
||||
r = UTIL.getClosestRowBefore(region, T31, c0);
|
||||
assertTrue(Bytes.equals(T10, r.getRow()));
|
||||
|
||||
// Ok. Let the candidate come out of hfile but have delete of
|
||||
|
@ -279,7 +280,7 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase {
|
|||
region.put(p);
|
||||
d = new Delete(T10);
|
||||
d.deleteColumn(c1, c1);
|
||||
r = region.getClosestRowBefore(T12, c0);
|
||||
r = UTIL.getClosestRowBefore(region, T12, c0);
|
||||
assertTrue(Bytes.equals(T11, r.getRow()));
|
||||
} finally {
|
||||
if (region != null) {
|
||||
|
@ -316,13 +317,13 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase {
|
|||
region.put(p);
|
||||
|
||||
// try finding "035"
|
||||
Result r = region.getClosestRowBefore(T35, c0);
|
||||
Result r = UTIL.getClosestRowBefore(region, T35, c0);
|
||||
assertTrue(Bytes.equals(T30, r.getRow()));
|
||||
|
||||
region.flush(true);
|
||||
|
||||
// try finding "035"
|
||||
r = region.getClosestRowBefore(T35, c0);
|
||||
r = UTIL.getClosestRowBefore(region, T35, c0);
|
||||
assertTrue(Bytes.equals(T30, r.getRow()));
|
||||
|
||||
p = new Put(T20);
|
||||
|
@ -330,13 +331,13 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase {
|
|||
region.put(p);
|
||||
|
||||
// try finding "035"
|
||||
r = region.getClosestRowBefore(T35, c0);
|
||||
r = UTIL.getClosestRowBefore(region, T35, c0);
|
||||
assertTrue(Bytes.equals(T30, r.getRow()));
|
||||
|
||||
region.flush(true);
|
||||
|
||||
// try finding "035"
|
||||
r = region.getClosestRowBefore(T35, c0);
|
||||
r = UTIL.getClosestRowBefore(region, T35, c0);
|
||||
assertTrue(Bytes.equals(T30, r.getRow()));
|
||||
} finally {
|
||||
if (region != null) {
|
||||
|
|
|
@ -89,20 +89,20 @@ public class TestMinVersions {
|
|||
// now make sure that getClosestBefore(...) get can
|
||||
// rows that would be expired without minVersion.
|
||||
// also make sure it gets the latest version
|
||||
Result r = region.getClosestRowBefore(T1, c0);
|
||||
Result r = hbu.getClosestRowBefore(region, T1, c0);
|
||||
checkResult(r, c0, T4);
|
||||
|
||||
r = region.getClosestRowBefore(T2, c0);
|
||||
r = hbu.getClosestRowBefore(region, T2, c0);
|
||||
checkResult(r, c0, T4);
|
||||
|
||||
// now flush/compact
|
||||
region.flush(true);
|
||||
region.compact(true);
|
||||
|
||||
r = region.getClosestRowBefore(T1, c0);
|
||||
r = hbu.getClosestRowBefore(region, T1, c0);
|
||||
checkResult(r, c0, T4);
|
||||
|
||||
r = region.getClosestRowBefore(T2, c0);
|
||||
r = hbu.getClosestRowBefore(region, T2, c0);
|
||||
checkResult(r, c0, T4);
|
||||
} finally {
|
||||
HBaseTestingUtility.closeRegionAndWAL(region);
|
||||
|
|
|
@ -879,16 +879,6 @@ public class TestWithDisabledAuthorization extends SecureTestUtil {
|
|||
}
|
||||
}, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE);
|
||||
|
||||
// preGetClosestRowBefore
|
||||
verifyAllowed(new AccessTestAction() {
|
||||
@Override
|
||||
public Object run() throws Exception {
|
||||
ACCESS_CONTROLLER.preGetClosestRowBefore(ObserverContext.createAndPrepare(RCP_ENV, null),
|
||||
TEST_ROW, TEST_FAMILY, new Result());
|
||||
return null;
|
||||
}
|
||||
}, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE);
|
||||
|
||||
// preGetOp
|
||||
verifyAllowed(new AccessTestAction() {
|
||||
@Override
|
||||
|
|
|
@ -1610,25 +1610,12 @@ public class ThriftServerRunner implements Runnable {
|
|||
}
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
@Override
|
||||
public List<TCell> getRowOrBefore(ByteBuffer tableName, ByteBuffer row,
|
||||
ByteBuffer family) throws IOError {
|
||||
try {
|
||||
Result result = getRowOrBefore(getBytes(tableName), getBytes(row), getBytes(family));
|
||||
return ThriftUtilities.cellFromHBase(result.rawCells());
|
||||
} catch (IOException e) {
|
||||
LOG.warn(e.getMessage(), e);
|
||||
throw new IOError(Throwables.getStackTraceAsString(e));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public TRegionInfo getRegionInfo(ByteBuffer searchRow) throws IOError {
|
||||
try {
|
||||
byte[] row = getBytes(searchRow);
|
||||
Result startRowResult =
|
||||
getRowOrBefore(TableName.META_TABLE_NAME.getName(), row, HConstants.CATALOG_FAMILY);
|
||||
Result startRowResult = getReverseScanResult(TableName.META_TABLE_NAME.getName(), row,
|
||||
HConstants.CATALOG_FAMILY);
|
||||
|
||||
if (startRowResult == null) {
|
||||
throw new IOException("Cannot find row in "+ TableName.META_TABLE_NAME+", row="
|
||||
|
@ -1662,7 +1649,8 @@ public class ThriftServerRunner implements Runnable {
|
|||
}
|
||||
}
|
||||
|
||||
private Result getRowOrBefore(byte[] tableName, byte[] row, byte[] family) throws IOException {
|
||||
private Result getReverseScanResult(byte[] tableName, byte[] row, byte[] family)
|
||||
throws IOException {
|
||||
Scan scan = new Scan(row);
|
||||
scan.setReversed(true);
|
||||
scan.addFamily(family);
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -906,22 +906,6 @@ service Hbase {
|
|||
1:ScannerID id
|
||||
) throws (1:IOError io, 2:IllegalArgument ia)
|
||||
|
||||
/**
|
||||
* Get the row just before the specified one.
|
||||
*
|
||||
* @return value for specified row/column
|
||||
*/
|
||||
list<TCell> getRowOrBefore(
|
||||
/** name of table */
|
||||
1:Text tableName,
|
||||
|
||||
/** row key */
|
||||
2:Text row,
|
||||
|
||||
/** column name */
|
||||
3:Text family
|
||||
) throws (1:IOError io)
|
||||
|
||||
/**
|
||||
* Get the regininfo for the specified row. It scans
|
||||
* the metatable to find region's start and end keys.
|
||||
|
|
Loading…
Reference in New Issue