HBASE-13954 Remove HTableInterface#getRowOrBefore related server side code. (Ashish)
This commit is contained in:
parent
cceee1b0c3
commit
3b6db26863
|
@ -110,7 +110,6 @@ public class Get extends Query
|
||||||
this.storeOffset = get.getRowOffsetPerColumnFamily();
|
this.storeOffset = get.getRowOffsetPerColumnFamily();
|
||||||
this.tr = get.getTimeRange();
|
this.tr = get.getTimeRange();
|
||||||
this.checkExistenceOnly = get.isCheckExistenceOnly();
|
this.checkExistenceOnly = get.isCheckExistenceOnly();
|
||||||
this.closestRowBefore = get.isClosestRowBefore();
|
|
||||||
Map<byte[], NavigableSet<byte[]>> fams = get.getFamilyMap();
|
Map<byte[], NavigableSet<byte[]>> fams = get.getFamilyMap();
|
||||||
for (Map.Entry<byte[],NavigableSet<byte[]>> entry : fams.entrySet()) {
|
for (Map.Entry<byte[],NavigableSet<byte[]>> entry : fams.entrySet()) {
|
||||||
byte [] fam = entry.getKey();
|
byte [] fam = entry.getKey();
|
||||||
|
@ -137,12 +136,23 @@ public class Get extends Query
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This will always return the default value which is false as client cannot set the value to this
|
||||||
|
* property any more.
|
||||||
|
* @deprecated since 2.0.0 and will be removed in 3.0.0
|
||||||
|
*/
|
||||||
|
@Deprecated
|
||||||
public boolean isClosestRowBefore() {
|
public boolean isClosestRowBefore() {
|
||||||
return closestRowBefore;
|
return closestRowBefore;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is not used any more and does nothing. Use reverse scan instead.
|
||||||
|
* @deprecated since 2.0.0 and will be removed in 3.0.0
|
||||||
|
*/
|
||||||
|
@Deprecated
|
||||||
public Get setClosestRowBefore(boolean closestRowBefore) {
|
public Get setClosestRowBefore(boolean closestRowBefore) {
|
||||||
this.closestRowBefore = closestRowBefore;
|
// do Nothing
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -937,24 +937,6 @@ public class Scan extends Query {
|
||||||
return (Scan) super.setIsolationLevel(level);
|
return (Scan) super.setIsolationLevel(level);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Utility that creates a Scan that will do a small scan in reverse from passed row
|
|
||||||
* looking for next closest row.
|
|
||||||
* @param row
|
|
||||||
* @param family
|
|
||||||
* @return An instance of Scan primed with passed <code>row</code> and <code>family</code> to
|
|
||||||
* scan in reverse for one row only.
|
|
||||||
*/
|
|
||||||
static Scan createGetClosestRowOrBeforeReverseScan(byte[] row) {
|
|
||||||
// Below does not work if you add in family; need to add the family qualifier that is highest
|
|
||||||
// possible family qualifier. Do we have such a notion? Would have to be magic.
|
|
||||||
Scan scan = new Scan(row);
|
|
||||||
scan.setSmall(true);
|
|
||||||
scan.setReversed(true);
|
|
||||||
scan.setCaching(1);
|
|
||||||
return scan;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Enable collection of {@link ScanMetrics}. For advanced users.
|
* Enable collection of {@link ScanMetrics}. For advanced users.
|
||||||
* @param enabled Set to true to enable accumulating scan metrics
|
* @param enabled Set to true to enable accumulating scan metrics
|
||||||
|
|
|
@ -97,7 +97,6 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServic
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest;
|
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
|
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest;
|
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse;
|
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto;
|
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue;
|
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue;
|
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue;
|
||||||
|
@ -122,12 +121,12 @@ import org.apache.hadoop.hbase.protobuf.generated.RPCProtos;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest;
|
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest;
|
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.WALProtos;
|
import org.apache.hadoop.hbase.protobuf.generated.WALProtos;
|
||||||
|
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.BulkLoadDescriptor;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor;
|
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor;
|
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.FlushAction;
|
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.FlushAction;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor;
|
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.EventType;
|
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.EventType;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.BulkLoadDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.StoreDescriptor;
|
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.StoreDescriptor;
|
||||||
import org.apache.hadoop.hbase.quotas.QuotaScope;
|
import org.apache.hadoop.hbase.quotas.QuotaScope;
|
||||||
import org.apache.hadoop.hbase.quotas.QuotaType;
|
import org.apache.hadoop.hbase.quotas.QuotaType;
|
||||||
|
@ -489,9 +488,6 @@ public final class ProtobufUtil {
|
||||||
if (proto.hasExistenceOnly() && proto.getExistenceOnly()){
|
if (proto.hasExistenceOnly() && proto.getExistenceOnly()){
|
||||||
get.setCheckExistenceOnly(true);
|
get.setCheckExistenceOnly(true);
|
||||||
}
|
}
|
||||||
if (proto.hasClosestRowBefore() && proto.getClosestRowBefore()){
|
|
||||||
get.setClosestRowBefore(true);
|
|
||||||
}
|
|
||||||
if (proto.hasConsistency()) {
|
if (proto.hasConsistency()) {
|
||||||
get.setConsistency(toConsistency(proto.getConsistency()));
|
get.setConsistency(toConsistency(proto.getConsistency()));
|
||||||
}
|
}
|
||||||
|
@ -1077,9 +1073,6 @@ public final class ProtobufUtil {
|
||||||
if (get.isCheckExistenceOnly()){
|
if (get.isCheckExistenceOnly()){
|
||||||
builder.setExistenceOnly(true);
|
builder.setExistenceOnly(true);
|
||||||
}
|
}
|
||||||
if (get.isClosestRowBefore()){
|
|
||||||
builder.setClosestRowBefore(true);
|
|
||||||
}
|
|
||||||
if (get.getConsistency() != null && get.getConsistency() != Consistency.STRONG) {
|
if (get.getConsistency() != null && get.getConsistency() != Consistency.STRONG) {
|
||||||
builder.setConsistency(toConsistency(get.getConsistency()));
|
builder.setConsistency(toConsistency(get.getConsistency()));
|
||||||
}
|
}
|
||||||
|
@ -1549,33 +1542,6 @@ public final class ProtobufUtil {
|
||||||
|
|
||||||
// Start helpers for Client
|
// Start helpers for Client
|
||||||
|
|
||||||
/**
|
|
||||||
* A helper to get a row of the closet one before using client protocol.
|
|
||||||
*
|
|
||||||
* @param client
|
|
||||||
* @param regionName
|
|
||||||
* @param row
|
|
||||||
* @param family
|
|
||||||
* @return the row or the closestRowBefore if it doesn't exist
|
|
||||||
* @throws IOException
|
|
||||||
* @deprecated since 0.99 - use reversed scanner instead.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public static Result getRowOrBefore(final ClientService.BlockingInterface client,
|
|
||||||
final byte[] regionName, final byte[] row,
|
|
||||||
final byte[] family) throws IOException {
|
|
||||||
GetRequest request =
|
|
||||||
RequestConverter.buildGetRowOrBeforeRequest(
|
|
||||||
regionName, row, family);
|
|
||||||
try {
|
|
||||||
GetResponse response = client.get(null, request);
|
|
||||||
if (!response.hasResult()) return null;
|
|
||||||
return toResult(response.getResult());
|
|
||||||
} catch (ServiceException se) {
|
|
||||||
throw getRemoteException(se);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A helper to bulk load a list of HFiles using client protocol.
|
* A helper to bulk load a list of HFiles using client protocol.
|
||||||
*
|
*
|
||||||
|
|
|
@ -64,7 +64,6 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionReques
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
|
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest;
|
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest.FamilyPath;
|
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest.FamilyPath;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Column;
|
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Condition;
|
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Condition;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest;
|
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest;
|
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest;
|
||||||
|
@ -123,35 +122,6 @@ public final class RequestConverter {
|
||||||
|
|
||||||
// Start utilities for Client
|
// Start utilities for Client
|
||||||
|
|
||||||
/**
|
|
||||||
* Create a new protocol buffer GetRequest to get a row, all columns in a family.
|
|
||||||
* If there is no such row, return the closest row before it.
|
|
||||||
*
|
|
||||||
* @param regionName the name of the region to get
|
|
||||||
* @param row the row to get
|
|
||||||
* @param family the column family to get
|
|
||||||
* should return the immediate row before
|
|
||||||
* @return a protocol buffer GetReuqest
|
|
||||||
*/
|
|
||||||
public static GetRequest buildGetRowOrBeforeRequest(
|
|
||||||
final byte[] regionName, final byte[] row, final byte[] family) {
|
|
||||||
GetRequest.Builder builder = GetRequest.newBuilder();
|
|
||||||
RegionSpecifier region = buildRegionSpecifier(
|
|
||||||
RegionSpecifierType.REGION_NAME, regionName);
|
|
||||||
builder.setRegion(region);
|
|
||||||
|
|
||||||
Column.Builder columnBuilder = Column.newBuilder();
|
|
||||||
columnBuilder.setFamily(ByteStringer.wrap(family));
|
|
||||||
ClientProtos.Get.Builder getBuilder =
|
|
||||||
ClientProtos.Get.newBuilder();
|
|
||||||
getBuilder.setRow(ByteStringer.wrap(row));
|
|
||||||
getBuilder.addColumn(columnBuilder.build());
|
|
||||||
getBuilder.setClosestRowBefore(true);
|
|
||||||
builder.setGet(getBuilder.build());
|
|
||||||
return builder.build();
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a protocol buffer GetRequest for a client Get
|
* Create a protocol buffer GetRequest for a client Get
|
||||||
*
|
*
|
||||||
|
|
|
@ -514,14 +514,6 @@ public class TestClientNoCluster extends Configured implements Tool {
|
||||||
ClientProtos.Result.Builder resultBuilder = ClientProtos.Result.newBuilder();
|
ClientProtos.Result.Builder resultBuilder = ClientProtos.Result.newBuilder();
|
||||||
ByteString row = request.getGet().getRow();
|
ByteString row = request.getGet().getRow();
|
||||||
Pair<HRegionInfo, ServerName> p = meta.get(row.toByteArray());
|
Pair<HRegionInfo, ServerName> p = meta.get(row.toByteArray());
|
||||||
if (p == null) {
|
|
||||||
if (request.getGet().getClosestRowBefore()) {
|
|
||||||
byte [] bytes = row.toByteArray();
|
|
||||||
SortedMap<byte [], Pair<HRegionInfo, ServerName>> head =
|
|
||||||
bytes != null? meta.headMap(bytes): meta;
|
|
||||||
p = head == null? null: head.get(head.lastKey());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (p != null) {
|
if (p != null) {
|
||||||
resultBuilder.addCell(getRegionInfo(row, p.getFirst()));
|
resultBuilder.addCell(getRegionInfo(row, p.getFirst()));
|
||||||
resultBuilder.addCell(getServer(row, p.getSecond()));
|
resultBuilder.addCell(getServer(row, p.getSecond()));
|
||||||
|
|
|
@ -168,7 +168,6 @@ public class TestGet {
|
||||||
get.setReplicaId(2);
|
get.setReplicaId(2);
|
||||||
get.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
|
get.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
|
||||||
get.setCheckExistenceOnly(true);
|
get.setCheckExistenceOnly(true);
|
||||||
get.setClosestRowBefore(true);
|
|
||||||
get.setTimeRange(3, 4);
|
get.setTimeRange(3, 4);
|
||||||
get.setMaxVersions(11);
|
get.setMaxVersions(11);
|
||||||
get.setMaxResultsPerColumnFamily(10);
|
get.setMaxResultsPerColumnFamily(10);
|
||||||
|
@ -191,9 +190,7 @@ public class TestGet {
|
||||||
|
|
||||||
// from Get class
|
// from Get class
|
||||||
assertEquals(get.isCheckExistenceOnly(), copyGet.isCheckExistenceOnly());
|
assertEquals(get.isCheckExistenceOnly(), copyGet.isCheckExistenceOnly());
|
||||||
assertEquals(get.isClosestRowBefore(), copyGet.isClosestRowBefore());
|
|
||||||
assertTrue(get.getTimeRange().equals(copyGet.getTimeRange()));
|
assertTrue(get.getTimeRange().equals(copyGet.getTimeRange()));
|
||||||
assertEquals(get.isClosestRowBefore(), copyGet.isClosestRowBefore());
|
|
||||||
assertEquals(get.getMaxVersions(), copyGet.getMaxVersions());
|
assertEquals(get.getMaxVersions(), copyGet.getMaxVersions());
|
||||||
assertEquals(get.getMaxResultsPerColumnFamily(), copyGet.getMaxResultsPerColumnFamily());
|
assertEquals(get.getMaxResultsPerColumnFamily(), copyGet.getMaxResultsPerColumnFamily());
|
||||||
assertEquals(get.getRowOffsetPerColumnFamily(), copyGet.getRowOffsetPerColumnFamily());
|
assertEquals(get.getRowOffsetPerColumnFamily(), copyGet.getRowOffsetPerColumnFamily());
|
||||||
|
|
|
@ -1926,26 +1926,6 @@ public final class ClientProtos {
|
||||||
*/
|
*/
|
||||||
boolean getExistenceOnly();
|
boolean getExistenceOnly();
|
||||||
|
|
||||||
// optional bool closest_row_before = 11 [default = false];
|
|
||||||
/**
|
|
||||||
* <code>optional bool closest_row_before = 11 [default = false];</code>
|
|
||||||
*
|
|
||||||
* <pre>
|
|
||||||
* If the row to get doesn't exist, return the
|
|
||||||
* closest row before.
|
|
||||||
* </pre>
|
|
||||||
*/
|
|
||||||
boolean hasClosestRowBefore();
|
|
||||||
/**
|
|
||||||
* <code>optional bool closest_row_before = 11 [default = false];</code>
|
|
||||||
*
|
|
||||||
* <pre>
|
|
||||||
* If the row to get doesn't exist, return the
|
|
||||||
* closest row before.
|
|
||||||
* </pre>
|
|
||||||
*/
|
|
||||||
boolean getClosestRowBefore();
|
|
||||||
|
|
||||||
// optional .hbase.pb.Consistency consistency = 12 [default = STRONG];
|
// optional .hbase.pb.Consistency consistency = 12 [default = STRONG];
|
||||||
/**
|
/**
|
||||||
* <code>optional .hbase.pb.Consistency consistency = 12 [default = STRONG];</code>
|
* <code>optional .hbase.pb.Consistency consistency = 12 [default = STRONG];</code>
|
||||||
|
@ -1963,8 +1943,7 @@ public final class ClientProtos {
|
||||||
**
|
**
|
||||||
* The protocol buffer version of Get.
|
* The protocol buffer version of Get.
|
||||||
* Unless existence_only is specified, return all the requested data
|
* Unless existence_only is specified, return all the requested data
|
||||||
* for the row that matches exactly, or the one that immediately
|
* for the row that matches exactly.
|
||||||
* precedes it if closest_row_before is specified.
|
|
||||||
* </pre>
|
* </pre>
|
||||||
*/
|
*/
|
||||||
public static final class Get extends
|
public static final class Get extends
|
||||||
|
@ -2087,18 +2066,13 @@ public final class ClientProtos {
|
||||||
existenceOnly_ = input.readBool();
|
existenceOnly_ = input.readBool();
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case 88: {
|
|
||||||
bitField0_ |= 0x00000100;
|
|
||||||
closestRowBefore_ = input.readBool();
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case 96: {
|
case 96: {
|
||||||
int rawValue = input.readEnum();
|
int rawValue = input.readEnum();
|
||||||
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency value = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency.valueOf(rawValue);
|
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency value = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency.valueOf(rawValue);
|
||||||
if (value == null) {
|
if (value == null) {
|
||||||
unknownFields.mergeVarintField(12, rawValue);
|
unknownFields.mergeVarintField(12, rawValue);
|
||||||
} else {
|
} else {
|
||||||
bitField0_ |= 0x00000200;
|
bitField0_ |= 0x00000100;
|
||||||
consistency_ = value;
|
consistency_ = value;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
@ -2371,32 +2345,6 @@ public final class ClientProtos {
|
||||||
return existenceOnly_;
|
return existenceOnly_;
|
||||||
}
|
}
|
||||||
|
|
||||||
// optional bool closest_row_before = 11 [default = false];
|
|
||||||
public static final int CLOSEST_ROW_BEFORE_FIELD_NUMBER = 11;
|
|
||||||
private boolean closestRowBefore_;
|
|
||||||
/**
|
|
||||||
* <code>optional bool closest_row_before = 11 [default = false];</code>
|
|
||||||
*
|
|
||||||
* <pre>
|
|
||||||
* If the row to get doesn't exist, return the
|
|
||||||
* closest row before.
|
|
||||||
* </pre>
|
|
||||||
*/
|
|
||||||
public boolean hasClosestRowBefore() {
|
|
||||||
return ((bitField0_ & 0x00000100) == 0x00000100);
|
|
||||||
}
|
|
||||||
/**
|
|
||||||
* <code>optional bool closest_row_before = 11 [default = false];</code>
|
|
||||||
*
|
|
||||||
* <pre>
|
|
||||||
* If the row to get doesn't exist, return the
|
|
||||||
* closest row before.
|
|
||||||
* </pre>
|
|
||||||
*/
|
|
||||||
public boolean getClosestRowBefore() {
|
|
||||||
return closestRowBefore_;
|
|
||||||
}
|
|
||||||
|
|
||||||
// optional .hbase.pb.Consistency consistency = 12 [default = STRONG];
|
// optional .hbase.pb.Consistency consistency = 12 [default = STRONG];
|
||||||
public static final int CONSISTENCY_FIELD_NUMBER = 12;
|
public static final int CONSISTENCY_FIELD_NUMBER = 12;
|
||||||
private org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency consistency_;
|
private org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency consistency_;
|
||||||
|
@ -2404,7 +2352,7 @@ public final class ClientProtos {
|
||||||
* <code>optional .hbase.pb.Consistency consistency = 12 [default = STRONG];</code>
|
* <code>optional .hbase.pb.Consistency consistency = 12 [default = STRONG];</code>
|
||||||
*/
|
*/
|
||||||
public boolean hasConsistency() {
|
public boolean hasConsistency() {
|
||||||
return ((bitField0_ & 0x00000200) == 0x00000200);
|
return ((bitField0_ & 0x00000100) == 0x00000100);
|
||||||
}
|
}
|
||||||
/**
|
/**
|
||||||
* <code>optional .hbase.pb.Consistency consistency = 12 [default = STRONG];</code>
|
* <code>optional .hbase.pb.Consistency consistency = 12 [default = STRONG];</code>
|
||||||
|
@ -2424,7 +2372,6 @@ public final class ClientProtos {
|
||||||
storeLimit_ = 0;
|
storeLimit_ = 0;
|
||||||
storeOffset_ = 0;
|
storeOffset_ = 0;
|
||||||
existenceOnly_ = false;
|
existenceOnly_ = false;
|
||||||
closestRowBefore_ = false;
|
|
||||||
consistency_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency.STRONG;
|
consistency_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency.STRONG;
|
||||||
}
|
}
|
||||||
private byte memoizedIsInitialized = -1;
|
private byte memoizedIsInitialized = -1;
|
||||||
|
@ -2492,9 +2439,6 @@ public final class ClientProtos {
|
||||||
output.writeBool(10, existenceOnly_);
|
output.writeBool(10, existenceOnly_);
|
||||||
}
|
}
|
||||||
if (((bitField0_ & 0x00000100) == 0x00000100)) {
|
if (((bitField0_ & 0x00000100) == 0x00000100)) {
|
||||||
output.writeBool(11, closestRowBefore_);
|
|
||||||
}
|
|
||||||
if (((bitField0_ & 0x00000200) == 0x00000200)) {
|
|
||||||
output.writeEnum(12, consistency_.getNumber());
|
output.writeEnum(12, consistency_.getNumber());
|
||||||
}
|
}
|
||||||
getUnknownFields().writeTo(output);
|
getUnknownFields().writeTo(output);
|
||||||
|
@ -2547,10 +2491,6 @@ public final class ClientProtos {
|
||||||
.computeBoolSize(10, existenceOnly_);
|
.computeBoolSize(10, existenceOnly_);
|
||||||
}
|
}
|
||||||
if (((bitField0_ & 0x00000100) == 0x00000100)) {
|
if (((bitField0_ & 0x00000100) == 0x00000100)) {
|
||||||
size += com.google.protobuf.CodedOutputStream
|
|
||||||
.computeBoolSize(11, closestRowBefore_);
|
|
||||||
}
|
|
||||||
if (((bitField0_ & 0x00000200) == 0x00000200)) {
|
|
||||||
size += com.google.protobuf.CodedOutputStream
|
size += com.google.protobuf.CodedOutputStream
|
||||||
.computeEnumSize(12, consistency_.getNumber());
|
.computeEnumSize(12, consistency_.getNumber());
|
||||||
}
|
}
|
||||||
|
@ -2621,11 +2561,6 @@ public final class ClientProtos {
|
||||||
result = result && (getExistenceOnly()
|
result = result && (getExistenceOnly()
|
||||||
== other.getExistenceOnly());
|
== other.getExistenceOnly());
|
||||||
}
|
}
|
||||||
result = result && (hasClosestRowBefore() == other.hasClosestRowBefore());
|
|
||||||
if (hasClosestRowBefore()) {
|
|
||||||
result = result && (getClosestRowBefore()
|
|
||||||
== other.getClosestRowBefore());
|
|
||||||
}
|
|
||||||
result = result && (hasConsistency() == other.hasConsistency());
|
result = result && (hasConsistency() == other.hasConsistency());
|
||||||
if (hasConsistency()) {
|
if (hasConsistency()) {
|
||||||
result = result &&
|
result = result &&
|
||||||
|
@ -2684,10 +2619,6 @@ public final class ClientProtos {
|
||||||
hash = (37 * hash) + EXISTENCE_ONLY_FIELD_NUMBER;
|
hash = (37 * hash) + EXISTENCE_ONLY_FIELD_NUMBER;
|
||||||
hash = (53 * hash) + hashBoolean(getExistenceOnly());
|
hash = (53 * hash) + hashBoolean(getExistenceOnly());
|
||||||
}
|
}
|
||||||
if (hasClosestRowBefore()) {
|
|
||||||
hash = (37 * hash) + CLOSEST_ROW_BEFORE_FIELD_NUMBER;
|
|
||||||
hash = (53 * hash) + hashBoolean(getClosestRowBefore());
|
|
||||||
}
|
|
||||||
if (hasConsistency()) {
|
if (hasConsistency()) {
|
||||||
hash = (37 * hash) + CONSISTENCY_FIELD_NUMBER;
|
hash = (37 * hash) + CONSISTENCY_FIELD_NUMBER;
|
||||||
hash = (53 * hash) + hashEnum(getConsistency());
|
hash = (53 * hash) + hashEnum(getConsistency());
|
||||||
|
@ -2770,8 +2701,7 @@ public final class ClientProtos {
|
||||||
**
|
**
|
||||||
* The protocol buffer version of Get.
|
* The protocol buffer version of Get.
|
||||||
* Unless existence_only is specified, return all the requested data
|
* Unless existence_only is specified, return all the requested data
|
||||||
* for the row that matches exactly, or the one that immediately
|
* for the row that matches exactly.
|
||||||
* precedes it if closest_row_before is specified.
|
|
||||||
* </pre>
|
* </pre>
|
||||||
*/
|
*/
|
||||||
public static final class Builder extends
|
public static final class Builder extends
|
||||||
|
@ -2849,10 +2779,8 @@ public final class ClientProtos {
|
||||||
bitField0_ = (bitField0_ & ~0x00000100);
|
bitField0_ = (bitField0_ & ~0x00000100);
|
||||||
existenceOnly_ = false;
|
existenceOnly_ = false;
|
||||||
bitField0_ = (bitField0_ & ~0x00000200);
|
bitField0_ = (bitField0_ & ~0x00000200);
|
||||||
closestRowBefore_ = false;
|
|
||||||
bitField0_ = (bitField0_ & ~0x00000400);
|
|
||||||
consistency_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency.STRONG;
|
consistency_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency.STRONG;
|
||||||
bitField0_ = (bitField0_ & ~0x00000800);
|
bitField0_ = (bitField0_ & ~0x00000400);
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2942,10 +2870,6 @@ public final class ClientProtos {
|
||||||
if (((from_bitField0_ & 0x00000400) == 0x00000400)) {
|
if (((from_bitField0_ & 0x00000400) == 0x00000400)) {
|
||||||
to_bitField0_ |= 0x00000100;
|
to_bitField0_ |= 0x00000100;
|
||||||
}
|
}
|
||||||
result.closestRowBefore_ = closestRowBefore_;
|
|
||||||
if (((from_bitField0_ & 0x00000800) == 0x00000800)) {
|
|
||||||
to_bitField0_ |= 0x00000200;
|
|
||||||
}
|
|
||||||
result.consistency_ = consistency_;
|
result.consistency_ = consistency_;
|
||||||
result.bitField0_ = to_bitField0_;
|
result.bitField0_ = to_bitField0_;
|
||||||
onBuilt();
|
onBuilt();
|
||||||
|
@ -3039,9 +2963,6 @@ public final class ClientProtos {
|
||||||
if (other.hasExistenceOnly()) {
|
if (other.hasExistenceOnly()) {
|
||||||
setExistenceOnly(other.getExistenceOnly());
|
setExistenceOnly(other.getExistenceOnly());
|
||||||
}
|
}
|
||||||
if (other.hasClosestRowBefore()) {
|
|
||||||
setClosestRowBefore(other.getClosestRowBefore());
|
|
||||||
}
|
|
||||||
if (other.hasConsistency()) {
|
if (other.hasConsistency()) {
|
||||||
setConsistency(other.getConsistency());
|
setConsistency(other.getConsistency());
|
||||||
}
|
}
|
||||||
|
@ -4029,66 +3950,13 @@ public final class ClientProtos {
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
// optional bool closest_row_before = 11 [default = false];
|
|
||||||
private boolean closestRowBefore_ ;
|
|
||||||
/**
|
|
||||||
* <code>optional bool closest_row_before = 11 [default = false];</code>
|
|
||||||
*
|
|
||||||
* <pre>
|
|
||||||
* If the row to get doesn't exist, return the
|
|
||||||
* closest row before.
|
|
||||||
* </pre>
|
|
||||||
*/
|
|
||||||
public boolean hasClosestRowBefore() {
|
|
||||||
return ((bitField0_ & 0x00000400) == 0x00000400);
|
|
||||||
}
|
|
||||||
/**
|
|
||||||
* <code>optional bool closest_row_before = 11 [default = false];</code>
|
|
||||||
*
|
|
||||||
* <pre>
|
|
||||||
* If the row to get doesn't exist, return the
|
|
||||||
* closest row before.
|
|
||||||
* </pre>
|
|
||||||
*/
|
|
||||||
public boolean getClosestRowBefore() {
|
|
||||||
return closestRowBefore_;
|
|
||||||
}
|
|
||||||
/**
|
|
||||||
* <code>optional bool closest_row_before = 11 [default = false];</code>
|
|
||||||
*
|
|
||||||
* <pre>
|
|
||||||
* If the row to get doesn't exist, return the
|
|
||||||
* closest row before.
|
|
||||||
* </pre>
|
|
||||||
*/
|
|
||||||
public Builder setClosestRowBefore(boolean value) {
|
|
||||||
bitField0_ |= 0x00000400;
|
|
||||||
closestRowBefore_ = value;
|
|
||||||
onChanged();
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
/**
|
|
||||||
* <code>optional bool closest_row_before = 11 [default = false];</code>
|
|
||||||
*
|
|
||||||
* <pre>
|
|
||||||
* If the row to get doesn't exist, return the
|
|
||||||
* closest row before.
|
|
||||||
* </pre>
|
|
||||||
*/
|
|
||||||
public Builder clearClosestRowBefore() {
|
|
||||||
bitField0_ = (bitField0_ & ~0x00000400);
|
|
||||||
closestRowBefore_ = false;
|
|
||||||
onChanged();
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
// optional .hbase.pb.Consistency consistency = 12 [default = STRONG];
|
// optional .hbase.pb.Consistency consistency = 12 [default = STRONG];
|
||||||
private org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency consistency_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency.STRONG;
|
private org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency consistency_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency.STRONG;
|
||||||
/**
|
/**
|
||||||
* <code>optional .hbase.pb.Consistency consistency = 12 [default = STRONG];</code>
|
* <code>optional .hbase.pb.Consistency consistency = 12 [default = STRONG];</code>
|
||||||
*/
|
*/
|
||||||
public boolean hasConsistency() {
|
public boolean hasConsistency() {
|
||||||
return ((bitField0_ & 0x00000800) == 0x00000800);
|
return ((bitField0_ & 0x00000400) == 0x00000400);
|
||||||
}
|
}
|
||||||
/**
|
/**
|
||||||
* <code>optional .hbase.pb.Consistency consistency = 12 [default = STRONG];</code>
|
* <code>optional .hbase.pb.Consistency consistency = 12 [default = STRONG];</code>
|
||||||
|
@ -4103,7 +3971,7 @@ public final class ClientProtos {
|
||||||
if (value == null) {
|
if (value == null) {
|
||||||
throw new NullPointerException();
|
throw new NullPointerException();
|
||||||
}
|
}
|
||||||
bitField0_ |= 0x00000800;
|
bitField0_ |= 0x00000400;
|
||||||
consistency_ = value;
|
consistency_ = value;
|
||||||
onChanged();
|
onChanged();
|
||||||
return this;
|
return this;
|
||||||
|
@ -4112,7 +3980,7 @@ public final class ClientProtos {
|
||||||
* <code>optional .hbase.pb.Consistency consistency = 12 [default = STRONG];</code>
|
* <code>optional .hbase.pb.Consistency consistency = 12 [default = STRONG];</code>
|
||||||
*/
|
*/
|
||||||
public Builder clearConsistency() {
|
public Builder clearConsistency() {
|
||||||
bitField0_ = (bitField0_ & ~0x00000800);
|
bitField0_ = (bitField0_ & ~0x00000400);
|
||||||
consistency_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency.STRONG;
|
consistency_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency.STRONG;
|
||||||
onChanged();
|
onChanged();
|
||||||
return this;
|
return this;
|
||||||
|
@ -33253,135 +33121,134 @@ public final class ClientProtos {
|
||||||
"o\032\017MapReduce.proto\"\037\n\016Authorizations\022\r\n\005" +
|
"o\032\017MapReduce.proto\"\037\n\016Authorizations\022\r\n\005" +
|
||||||
"label\030\001 \003(\t\"$\n\016CellVisibility\022\022\n\nexpress" +
|
"label\030\001 \003(\t\"$\n\016CellVisibility\022\022\n\nexpress" +
|
||||||
"ion\030\001 \002(\t\"+\n\006Column\022\016\n\006family\030\001 \002(\014\022\021\n\tq" +
|
"ion\030\001 \002(\t\"+\n\006Column\022\016\n\006family\030\001 \002(\014\022\021\n\tq" +
|
||||||
"ualifier\030\002 \003(\014\"\201\003\n\003Get\022\013\n\003row\030\001 \002(\014\022 \n\006c" +
|
"ualifier\030\002 \003(\014\"\336\002\n\003Get\022\013\n\003row\030\001 \002(\014\022 \n\006c" +
|
||||||
"olumn\030\002 \003(\0132\020.hbase.pb.Column\022*\n\tattribu" +
|
"olumn\030\002 \003(\0132\020.hbase.pb.Column\022*\n\tattribu" +
|
||||||
"te\030\003 \003(\0132\027.hbase.pb.NameBytesPair\022 \n\006fil" +
|
"te\030\003 \003(\0132\027.hbase.pb.NameBytesPair\022 \n\006fil" +
|
||||||
"ter\030\004 \001(\0132\020.hbase.pb.Filter\022\'\n\ntime_rang" +
|
"ter\030\004 \001(\0132\020.hbase.pb.Filter\022\'\n\ntime_rang" +
|
||||||
"e\030\005 \001(\0132\023.hbase.pb.TimeRange\022\027\n\014max_vers",
|
"e\030\005 \001(\0132\023.hbase.pb.TimeRange\022\027\n\014max_vers",
|
||||||
"ions\030\006 \001(\r:\0011\022\032\n\014cache_blocks\030\007 \001(\010:\004tru" +
|
"ions\030\006 \001(\r:\0011\022\032\n\014cache_blocks\030\007 \001(\010:\004tru" +
|
||||||
"e\022\023\n\013store_limit\030\010 \001(\r\022\024\n\014store_offset\030\t" +
|
"e\022\023\n\013store_limit\030\010 \001(\r\022\024\n\014store_offset\030\t" +
|
||||||
" \001(\r\022\035\n\016existence_only\030\n \001(\010:\005false\022!\n\022c" +
|
" \001(\r\022\035\n\016existence_only\030\n \001(\010:\005false\0222\n\013c" +
|
||||||
"losest_row_before\030\013 \001(\010:\005false\0222\n\013consis" +
|
"onsistency\030\014 \001(\0162\025.hbase.pb.Consistency:" +
|
||||||
"tency\030\014 \001(\0162\025.hbase.pb.Consistency:\006STRO" +
|
"\006STRONG\"\203\001\n\006Result\022\034\n\004cell\030\001 \003(\0132\016.hbase" +
|
||||||
"NG\"\203\001\n\006Result\022\034\n\004cell\030\001 \003(\0132\016.hbase.pb.C" +
|
".pb.Cell\022\035\n\025associated_cell_count\030\002 \001(\005\022" +
|
||||||
"ell\022\035\n\025associated_cell_count\030\002 \001(\005\022\016\n\006ex" +
|
"\016\n\006exists\030\003 \001(\010\022\024\n\005stale\030\004 \001(\010:\005false\022\026\n" +
|
||||||
"ists\030\003 \001(\010\022\024\n\005stale\030\004 \001(\010:\005false\022\026\n\007part" +
|
"\007partial\030\005 \001(\010:\005false\"S\n\nGetRequest\022)\n\006r" +
|
||||||
"ial\030\005 \001(\010:\005false\"S\n\nGetRequest\022)\n\006region" +
|
"egion\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022\032" +
|
||||||
"\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022\032\n\003get",
|
"\n\003get\030\002 \002(\0132\r.hbase.pb.Get\"/\n\013GetRespons",
|
||||||
"\030\002 \002(\0132\r.hbase.pb.Get\"/\n\013GetResponse\022 \n\006" +
|
"e\022 \n\006result\030\001 \001(\0132\020.hbase.pb.Result\"\222\001\n\t" +
|
||||||
"result\030\001 \001(\0132\020.hbase.pb.Result\"\222\001\n\tCondi" +
|
"Condition\022\013\n\003row\030\001 \002(\014\022\016\n\006family\030\002 \002(\014\022\021" +
|
||||||
"tion\022\013\n\003row\030\001 \002(\014\022\016\n\006family\030\002 \002(\014\022\021\n\tqua" +
|
"\n\tqualifier\030\003 \002(\014\022+\n\014compare_type\030\004 \002(\0162" +
|
||||||
"lifier\030\003 \002(\014\022+\n\014compare_type\030\004 \002(\0162\025.hba" +
|
"\025.hbase.pb.CompareType\022(\n\ncomparator\030\005 \002" +
|
||||||
"se.pb.CompareType\022(\n\ncomparator\030\005 \002(\0132\024." +
|
"(\0132\024.hbase.pb.Comparator\"\364\006\n\rMutationPro" +
|
||||||
"hbase.pb.Comparator\"\364\006\n\rMutationProto\022\013\n" +
|
"to\022\013\n\003row\030\001 \001(\014\0229\n\013mutate_type\030\002 \001(\0162$.h" +
|
||||||
"\003row\030\001 \001(\014\0229\n\013mutate_type\030\002 \001(\0162$.hbase." +
|
"base.pb.MutationProto.MutationType\0229\n\014co" +
|
||||||
"pb.MutationProto.MutationType\0229\n\014column_" +
|
"lumn_value\030\003 \003(\0132#.hbase.pb.MutationProt" +
|
||||||
"value\030\003 \003(\0132#.hbase.pb.MutationProto.Col" +
|
"o.ColumnValue\022\021\n\ttimestamp\030\004 \001(\004\022*\n\tattr" +
|
||||||
"umnValue\022\021\n\ttimestamp\030\004 \001(\004\022*\n\tattribute",
|
"ibute\030\005 \003(\0132\027.hbase.pb.NameBytesPair\022C\n\n",
|
||||||
"\030\005 \003(\0132\027.hbase.pb.NameBytesPair\022C\n\ndurab" +
|
"durability\030\006 \001(\0162\".hbase.pb.MutationProt" +
|
||||||
"ility\030\006 \001(\0162\".hbase.pb.MutationProto.Dur" +
|
"o.Durability:\013USE_DEFAULT\022\'\n\ntime_range\030" +
|
||||||
"ability:\013USE_DEFAULT\022\'\n\ntime_range\030\007 \001(\013" +
|
"\007 \001(\0132\023.hbase.pb.TimeRange\022\035\n\025associated" +
|
||||||
"2\023.hbase.pb.TimeRange\022\035\n\025associated_cell" +
|
"_cell_count\030\010 \001(\005\022\r\n\005nonce\030\t \001(\004\032\371\001\n\013Col" +
|
||||||
"_count\030\010 \001(\005\022\r\n\005nonce\030\t \001(\004\032\371\001\n\013ColumnVa" +
|
"umnValue\022\016\n\006family\030\001 \002(\014\022K\n\017qualifier_va" +
|
||||||
"lue\022\016\n\006family\030\001 \002(\014\022K\n\017qualifier_value\030\002" +
|
"lue\030\002 \003(\01322.hbase.pb.MutationProto.Colum" +
|
||||||
" \003(\01322.hbase.pb.MutationProto.ColumnValu" +
|
"nValue.QualifierValue\032\214\001\n\016QualifierValue" +
|
||||||
"e.QualifierValue\032\214\001\n\016QualifierValue\022\021\n\tq" +
|
"\022\021\n\tqualifier\030\001 \001(\014\022\r\n\005value\030\002 \001(\014\022\021\n\tti" +
|
||||||
"ualifier\030\001 \001(\014\022\r\n\005value\030\002 \001(\014\022\021\n\ttimesta" +
|
"mestamp\030\003 \001(\004\0227\n\013delete_type\030\004 \001(\0162\".hba" +
|
||||||
"mp\030\003 \001(\004\0227\n\013delete_type\030\004 \001(\0162\".hbase.pb",
|
"se.pb.MutationProto.DeleteType\022\014\n\004tags\030\005",
|
||||||
".MutationProto.DeleteType\022\014\n\004tags\030\005 \001(\014\"" +
|
" \001(\014\"W\n\nDurability\022\017\n\013USE_DEFAULT\020\000\022\014\n\010S" +
|
||||||
"W\n\nDurability\022\017\n\013USE_DEFAULT\020\000\022\014\n\010SKIP_W" +
|
"KIP_WAL\020\001\022\r\n\tASYNC_WAL\020\002\022\014\n\010SYNC_WAL\020\003\022\r" +
|
||||||
"AL\020\001\022\r\n\tASYNC_WAL\020\002\022\014\n\010SYNC_WAL\020\003\022\r\n\tFSY" +
|
"\n\tFSYNC_WAL\020\004\">\n\014MutationType\022\n\n\006APPEND\020" +
|
||||||
"NC_WAL\020\004\">\n\014MutationType\022\n\n\006APPEND\020\000\022\r\n\t" +
|
"\000\022\r\n\tINCREMENT\020\001\022\007\n\003PUT\020\002\022\n\n\006DELETE\020\003\"p\n" +
|
||||||
"INCREMENT\020\001\022\007\n\003PUT\020\002\022\n\n\006DELETE\020\003\"p\n\nDele" +
|
"\nDeleteType\022\026\n\022DELETE_ONE_VERSION\020\000\022\034\n\030D" +
|
||||||
"teType\022\026\n\022DELETE_ONE_VERSION\020\000\022\034\n\030DELETE" +
|
"ELETE_MULTIPLE_VERSIONS\020\001\022\021\n\rDELETE_FAMI" +
|
||||||
"_MULTIPLE_VERSIONS\020\001\022\021\n\rDELETE_FAMILY\020\002\022" +
|
"LY\020\002\022\031\n\025DELETE_FAMILY_VERSION\020\003\"\242\001\n\rMuta" +
|
||||||
"\031\n\025DELETE_FAMILY_VERSION\020\003\"\242\001\n\rMutateReq" +
|
"teRequest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.Reg" +
|
||||||
"uest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSp" +
|
"ionSpecifier\022)\n\010mutation\030\002 \002(\0132\027.hbase.p" +
|
||||||
"ecifier\022)\n\010mutation\030\002 \002(\0132\027.hbase.pb.Mut",
|
"b.MutationProto\022&\n\tcondition\030\003 \001(\0132\023.hba",
|
||||||
"ationProto\022&\n\tcondition\030\003 \001(\0132\023.hbase.pb" +
|
"se.pb.Condition\022\023\n\013nonce_group\030\004 \001(\004\"E\n\016" +
|
||||||
".Condition\022\023\n\013nonce_group\030\004 \001(\004\"E\n\016Mutat" +
|
"MutateResponse\022 \n\006result\030\001 \001(\0132\020.hbase.p" +
|
||||||
"eResponse\022 \n\006result\030\001 \001(\0132\020.hbase.pb.Res" +
|
"b.Result\022\021\n\tprocessed\030\002 \001(\010\"\346\003\n\004Scan\022 \n\006" +
|
||||||
"ult\022\021\n\tprocessed\030\002 \001(\010\"\346\003\n\004Scan\022 \n\006colum" +
|
"column\030\001 \003(\0132\020.hbase.pb.Column\022*\n\tattrib" +
|
||||||
"n\030\001 \003(\0132\020.hbase.pb.Column\022*\n\tattribute\030\002" +
|
"ute\030\002 \003(\0132\027.hbase.pb.NameBytesPair\022\021\n\tst" +
|
||||||
" \003(\0132\027.hbase.pb.NameBytesPair\022\021\n\tstart_r" +
|
"art_row\030\003 \001(\014\022\020\n\010stop_row\030\004 \001(\014\022 \n\006filte" +
|
||||||
"ow\030\003 \001(\014\022\020\n\010stop_row\030\004 \001(\014\022 \n\006filter\030\005 \001" +
|
"r\030\005 \001(\0132\020.hbase.pb.Filter\022\'\n\ntime_range\030" +
|
||||||
"(\0132\020.hbase.pb.Filter\022\'\n\ntime_range\030\006 \001(\013" +
|
"\006 \001(\0132\023.hbase.pb.TimeRange\022\027\n\014max_versio" +
|
||||||
"2\023.hbase.pb.TimeRange\022\027\n\014max_versions\030\007 " +
|
"ns\030\007 \001(\r:\0011\022\032\n\014cache_blocks\030\010 \001(\010:\004true\022" +
|
||||||
"\001(\r:\0011\022\032\n\014cache_blocks\030\010 \001(\010:\004true\022\022\n\nba",
|
"\022\n\nbatch_size\030\t \001(\r\022\027\n\017max_result_size\030\n",
|
||||||
"tch_size\030\t \001(\r\022\027\n\017max_result_size\030\n \001(\004\022" +
|
" \001(\004\022\023\n\013store_limit\030\013 \001(\r\022\024\n\014store_offse" +
|
||||||
"\023\n\013store_limit\030\013 \001(\r\022\024\n\014store_offset\030\014 \001" +
|
"t\030\014 \001(\r\022&\n\036load_column_families_on_deman" +
|
||||||
"(\r\022&\n\036load_column_families_on_demand\030\r \001" +
|
"d\030\r \001(\010\022\r\n\005small\030\016 \001(\010\022\027\n\010reversed\030\017 \001(\010" +
|
||||||
"(\010\022\r\n\005small\030\016 \001(\010\022\027\n\010reversed\030\017 \001(\010:\005fal" +
|
":\005false\0222\n\013consistency\030\020 \001(\0162\025.hbase.pb." +
|
||||||
"se\0222\n\013consistency\030\020 \001(\0162\025.hbase.pb.Consi" +
|
"Consistency:\006STRONG\022\017\n\007caching\030\021 \001(\r\"\220\002\n" +
|
||||||
"stency:\006STRONG\022\017\n\007caching\030\021 \001(\r\"\220\002\n\013Scan" +
|
"\013ScanRequest\022)\n\006region\030\001 \001(\0132\031.hbase.pb." +
|
||||||
"Request\022)\n\006region\030\001 \001(\0132\031.hbase.pb.Regio" +
|
"RegionSpecifier\022\034\n\004scan\030\002 \001(\0132\016.hbase.pb" +
|
||||||
"nSpecifier\022\034\n\004scan\030\002 \001(\0132\016.hbase.pb.Scan" +
|
".Scan\022\022\n\nscanner_id\030\003 \001(\004\022\026\n\016number_of_r" +
|
||||||
"\022\022\n\nscanner_id\030\003 \001(\004\022\026\n\016number_of_rows\030\004" +
|
"ows\030\004 \001(\r\022\025\n\rclose_scanner\030\005 \001(\010\022\025\n\rnext" +
|
||||||
" \001(\r\022\025\n\rclose_scanner\030\005 \001(\010\022\025\n\rnext_call",
|
"_call_seq\030\006 \001(\004\022\037\n\027client_handles_partia",
|
||||||
"_seq\030\006 \001(\004\022\037\n\027client_handles_partials\030\007 " +
|
"ls\030\007 \001(\010\022!\n\031client_handles_heartbeats\030\010 " +
|
||||||
"\001(\010\022!\n\031client_handles_heartbeats\030\010 \001(\010\022\032" +
|
"\001(\010\022\032\n\022track_scan_metrics\030\t \001(\010\"\232\002\n\014Scan" +
|
||||||
"\n\022track_scan_metrics\030\t \001(\010\"\232\002\n\014ScanRespo" +
|
"Response\022\030\n\020cells_per_result\030\001 \003(\r\022\022\n\nsc" +
|
||||||
"nse\022\030\n\020cells_per_result\030\001 \003(\r\022\022\n\nscanner" +
|
"anner_id\030\002 \001(\004\022\024\n\014more_results\030\003 \001(\010\022\013\n\003" +
|
||||||
"_id\030\002 \001(\004\022\024\n\014more_results\030\003 \001(\010\022\013\n\003ttl\030\004" +
|
"ttl\030\004 \001(\r\022!\n\007results\030\005 \003(\0132\020.hbase.pb.Re" +
|
||||||
" \001(\r\022!\n\007results\030\005 \003(\0132\020.hbase.pb.Result\022" +
|
"sult\022\r\n\005stale\030\006 \001(\010\022\037\n\027partial_flag_per_" +
|
||||||
"\r\n\005stale\030\006 \001(\010\022\037\n\027partial_flag_per_resul" +
|
"result\030\007 \003(\010\022\036\n\026more_results_in_region\030\010" +
|
||||||
"t\030\007 \003(\010\022\036\n\026more_results_in_region\030\010 \001(\010\022" +
|
" \001(\010\022\031\n\021heartbeat_message\030\t \001(\010\022+\n\014scan_" +
|
||||||
"\031\n\021heartbeat_message\030\t \001(\010\022+\n\014scan_metri" +
|
"metrics\030\n \001(\0132\025.hbase.pb.ScanMetrics\"\305\001\n" +
|
||||||
"cs\030\n \001(\0132\025.hbase.pb.ScanMetrics\"\305\001\n\024Bulk",
|
"\024BulkLoadHFileRequest\022)\n\006region\030\001 \002(\0132\031.",
|
||||||
"LoadHFileRequest\022)\n\006region\030\001 \002(\0132\031.hbase" +
|
"hbase.pb.RegionSpecifier\022>\n\013family_path\030" +
|
||||||
".pb.RegionSpecifier\022>\n\013family_path\030\002 \003(\013" +
|
"\002 \003(\0132).hbase.pb.BulkLoadHFileRequest.Fa" +
|
||||||
"2).hbase.pb.BulkLoadHFileRequest.FamilyP" +
|
"milyPath\022\026\n\016assign_seq_num\030\003 \001(\010\032*\n\nFami" +
|
||||||
"ath\022\026\n\016assign_seq_num\030\003 \001(\010\032*\n\nFamilyPat" +
|
"lyPath\022\016\n\006family\030\001 \002(\014\022\014\n\004path\030\002 \002(\t\"\'\n\025" +
|
||||||
"h\022\016\n\006family\030\001 \002(\014\022\014\n\004path\030\002 \002(\t\"\'\n\025BulkL" +
|
"BulkLoadHFileResponse\022\016\n\006loaded\030\001 \002(\010\"a\n" +
|
||||||
"oadHFileResponse\022\016\n\006loaded\030\001 \002(\010\"a\n\026Copr" +
|
"\026CoprocessorServiceCall\022\013\n\003row\030\001 \002(\014\022\024\n\014" +
|
||||||
"ocessorServiceCall\022\013\n\003row\030\001 \002(\014\022\024\n\014servi" +
|
"service_name\030\002 \002(\t\022\023\n\013method_name\030\003 \002(\t\022" +
|
||||||
"ce_name\030\002 \002(\t\022\023\n\013method_name\030\003 \002(\t\022\017\n\007re" +
|
"\017\n\007request\030\004 \002(\014\"B\n\030CoprocessorServiceRe" +
|
||||||
"quest\030\004 \002(\014\"B\n\030CoprocessorServiceResult\022" +
|
"sult\022&\n\005value\030\001 \001(\0132\027.hbase.pb.NameBytes" +
|
||||||
"&\n\005value\030\001 \001(\0132\027.hbase.pb.NameBytesPair\"",
|
"Pair\"v\n\031CoprocessorServiceRequest\022)\n\006reg",
|
||||||
"v\n\031CoprocessorServiceRequest\022)\n\006region\030\001" +
|
"ion\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022.\n\004" +
|
||||||
" \002(\0132\031.hbase.pb.RegionSpecifier\022.\n\004call\030" +
|
"call\030\002 \002(\0132 .hbase.pb.CoprocessorService" +
|
||||||
"\002 \002(\0132 .hbase.pb.CoprocessorServiceCall\"" +
|
"Call\"o\n\032CoprocessorServiceResponse\022)\n\006re" +
|
||||||
"o\n\032CoprocessorServiceResponse\022)\n\006region\030" +
|
"gion\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022&\n" +
|
||||||
"\001 \002(\0132\031.hbase.pb.RegionSpecifier\022&\n\005valu" +
|
"\005value\030\002 \002(\0132\027.hbase.pb.NameBytesPair\"\226\001" +
|
||||||
"e\030\002 \002(\0132\027.hbase.pb.NameBytesPair\"\226\001\n\006Act" +
|
"\n\006Action\022\r\n\005index\030\001 \001(\r\022)\n\010mutation\030\002 \001(" +
|
||||||
"ion\022\r\n\005index\030\001 \001(\r\022)\n\010mutation\030\002 \001(\0132\027.h" +
|
"\0132\027.hbase.pb.MutationProto\022\032\n\003get\030\003 \001(\0132" +
|
||||||
"base.pb.MutationProto\022\032\n\003get\030\003 \001(\0132\r.hba" +
|
"\r.hbase.pb.Get\0226\n\014service_call\030\004 \001(\0132 .h" +
|
||||||
"se.pb.Get\0226\n\014service_call\030\004 \001(\0132 .hbase." +
|
"base.pb.CoprocessorServiceCall\"k\n\014Region" +
|
||||||
"pb.CoprocessorServiceCall\"k\n\014RegionActio",
|
"Action\022)\n\006region\030\001 \002(\0132\031.hbase.pb.Region",
|
||||||
"n\022)\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSpeci" +
|
"Specifier\022\016\n\006atomic\030\002 \001(\010\022 \n\006action\030\003 \003(" +
|
||||||
"fier\022\016\n\006atomic\030\002 \001(\010\022 \n\006action\030\003 \003(\0132\020.h" +
|
"\0132\020.hbase.pb.Action\"D\n\017RegionLoadStats\022\027" +
|
||||||
"base.pb.Action\"D\n\017RegionLoadStats\022\027\n\014mem" +
|
"\n\014memstoreLoad\030\001 \001(\005:\0010\022\030\n\rheapOccupancy" +
|
||||||
"storeLoad\030\001 \001(\005:\0010\022\030\n\rheapOccupancy\030\002 \001(" +
|
"\030\002 \001(\005:\0010\"\332\001\n\021ResultOrException\022\r\n\005index" +
|
||||||
"\005:\0010\"\332\001\n\021ResultOrException\022\r\n\005index\030\001 \001(" +
|
"\030\001 \001(\r\022 \n\006result\030\002 \001(\0132\020.hbase.pb.Result" +
|
||||||
"\r\022 \n\006result\030\002 \001(\0132\020.hbase.pb.Result\022*\n\te" +
|
"\022*\n\texception\030\003 \001(\0132\027.hbase.pb.NameBytes" +
|
||||||
"xception\030\003 \001(\0132\027.hbase.pb.NameBytesPair\022" +
|
"Pair\022:\n\016service_result\030\004 \001(\0132\".hbase.pb." +
|
||||||
":\n\016service_result\030\004 \001(\0132\".hbase.pb.Copro" +
|
"CoprocessorServiceResult\022,\n\tloadStats\030\005 " +
|
||||||
"cessorServiceResult\022,\n\tloadStats\030\005 \001(\0132\031" +
|
"\001(\0132\031.hbase.pb.RegionLoadStats\"x\n\022Region" +
|
||||||
".hbase.pb.RegionLoadStats\"x\n\022RegionActio",
|
"ActionResult\0226\n\021resultOrException\030\001 \003(\0132",
|
||||||
"nResult\0226\n\021resultOrException\030\001 \003(\0132\033.hba" +
|
"\033.hbase.pb.ResultOrException\022*\n\texceptio" +
|
||||||
"se.pb.ResultOrException\022*\n\texception\030\002 \001" +
|
"n\030\002 \001(\0132\027.hbase.pb.NameBytesPair\"x\n\014Mult" +
|
||||||
"(\0132\027.hbase.pb.NameBytesPair\"x\n\014MultiRequ" +
|
"iRequest\022,\n\014regionAction\030\001 \003(\0132\026.hbase.p" +
|
||||||
"est\022,\n\014regionAction\030\001 \003(\0132\026.hbase.pb.Reg" +
|
"b.RegionAction\022\022\n\nnonceGroup\030\002 \001(\004\022&\n\tco" +
|
||||||
"ionAction\022\022\n\nnonceGroup\030\002 \001(\004\022&\n\tconditi" +
|
"ndition\030\003 \001(\0132\023.hbase.pb.Condition\"\\\n\rMu" +
|
||||||
"on\030\003 \001(\0132\023.hbase.pb.Condition\"\\\n\rMultiRe" +
|
"ltiResponse\0228\n\022regionActionResult\030\001 \003(\0132" +
|
||||||
"sponse\0228\n\022regionActionResult\030\001 \003(\0132\034.hba" +
|
"\034.hbase.pb.RegionActionResult\022\021\n\tprocess" +
|
||||||
"se.pb.RegionActionResult\022\021\n\tprocessed\030\002 " +
|
"ed\030\002 \001(\010*\'\n\013Consistency\022\n\n\006STRONG\020\000\022\014\n\010T" +
|
||||||
"\001(\010*\'\n\013Consistency\022\n\n\006STRONG\020\000\022\014\n\010TIMELI" +
|
"IMELINE\020\0012\203\004\n\rClientService\0222\n\003Get\022\024.hba" +
|
||||||
"NE\020\0012\203\004\n\rClientService\0222\n\003Get\022\024.hbase.pb",
|
"se.pb.GetRequest\032\025.hbase.pb.GetResponse\022",
|
||||||
".GetRequest\032\025.hbase.pb.GetResponse\022;\n\006Mu" +
|
";\n\006Mutate\022\027.hbase.pb.MutateRequest\032\030.hba" +
|
||||||
"tate\022\027.hbase.pb.MutateRequest\032\030.hbase.pb" +
|
"se.pb.MutateResponse\0225\n\004Scan\022\025.hbase.pb." +
|
||||||
".MutateResponse\0225\n\004Scan\022\025.hbase.pb.ScanR" +
|
"ScanRequest\032\026.hbase.pb.ScanResponse\022P\n\rB" +
|
||||||
"equest\032\026.hbase.pb.ScanResponse\022P\n\rBulkLo" +
|
"ulkLoadHFile\022\036.hbase.pb.BulkLoadHFileReq" +
|
||||||
"adHFile\022\036.hbase.pb.BulkLoadHFileRequest\032" +
|
"uest\032\037.hbase.pb.BulkLoadHFileResponse\022X\n" +
|
||||||
"\037.hbase.pb.BulkLoadHFileResponse\022X\n\013Exec" +
|
"\013ExecService\022#.hbase.pb.CoprocessorServi" +
|
||||||
"Service\022#.hbase.pb.CoprocessorServiceReq" +
|
"ceRequest\032$.hbase.pb.CoprocessorServiceR" +
|
||||||
"uest\032$.hbase.pb.CoprocessorServiceRespon" +
|
"esponse\022d\n\027ExecRegionServerService\022#.hba" +
|
||||||
"se\022d\n\027ExecRegionServerService\022#.hbase.pb" +
|
"se.pb.CoprocessorServiceRequest\032$.hbase." +
|
||||||
".CoprocessorServiceRequest\032$.hbase.pb.Co",
|
"pb.CoprocessorServiceResponse\0228\n\005Multi\022\026",
|
||||||
"processorServiceResponse\0228\n\005Multi\022\026.hbas" +
|
".hbase.pb.MultiRequest\032\027.hbase.pb.MultiR" +
|
||||||
"e.pb.MultiRequest\032\027.hbase.pb.MultiRespon" +
|
"esponseBB\n*org.apache.hadoop.hbase.proto" +
|
||||||
"seBB\n*org.apache.hadoop.hbase.protobuf.g" +
|
"buf.generatedB\014ClientProtosH\001\210\001\001\240\001\001"
|
||||||
"eneratedB\014ClientProtosH\001\210\001\001\240\001\001"
|
|
||||||
};
|
};
|
||||||
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
|
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
|
||||||
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
|
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
|
||||||
|
@ -33411,7 +33278,7 @@ public final class ClientProtos {
|
||||||
internal_static_hbase_pb_Get_fieldAccessorTable = new
|
internal_static_hbase_pb_Get_fieldAccessorTable = new
|
||||||
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
||||||
internal_static_hbase_pb_Get_descriptor,
|
internal_static_hbase_pb_Get_descriptor,
|
||||||
new java.lang.String[] { "Row", "Column", "Attribute", "Filter", "TimeRange", "MaxVersions", "CacheBlocks", "StoreLimit", "StoreOffset", "ExistenceOnly", "ClosestRowBefore", "Consistency", });
|
new java.lang.String[] { "Row", "Column", "Attribute", "Filter", "TimeRange", "MaxVersions", "CacheBlocks", "StoreLimit", "StoreOffset", "ExistenceOnly", "Consistency", });
|
||||||
internal_static_hbase_pb_Result_descriptor =
|
internal_static_hbase_pb_Result_descriptor =
|
||||||
getDescriptor().getMessageTypes().get(4);
|
getDescriptor().getMessageTypes().get(4);
|
||||||
internal_static_hbase_pb_Result_fieldAccessorTable = new
|
internal_static_hbase_pb_Result_fieldAccessorTable = new
|
||||||
|
|
|
@ -64,8 +64,7 @@ enum Consistency {
|
||||||
/**
|
/**
|
||||||
* The protocol buffer version of Get.
|
* The protocol buffer version of Get.
|
||||||
* Unless existence_only is specified, return all the requested data
|
* Unless existence_only is specified, return all the requested data
|
||||||
* for the row that matches exactly, or the one that immediately
|
* for the row that matches exactly.
|
||||||
* precedes it if closest_row_before is specified.
|
|
||||||
*/
|
*/
|
||||||
message Get {
|
message Get {
|
||||||
required bytes row = 1;
|
required bytes row = 1;
|
||||||
|
@ -82,10 +81,6 @@ message Get {
|
||||||
// the existence.
|
// the existence.
|
||||||
optional bool existence_only = 10 [default = false];
|
optional bool existence_only = 10 [default = false];
|
||||||
|
|
||||||
// If the row to get doesn't exist, return the
|
|
||||||
// closest row before.
|
|
||||||
optional bool closest_row_before = 11 [default = false];
|
|
||||||
|
|
||||||
optional Consistency consistency = 12 [default = STRONG];
|
optional Consistency consistency = 12 [default = STRONG];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -675,10 +675,6 @@ public class RemoteHTable implements Table {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
public Result getRowOrBefore(byte[] row, byte[] family) throws IOException {
|
|
||||||
throw new IOException("getRowOrBefore not supported");
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier,
|
public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier,
|
||||||
byte[] value, Put put) throws IOException {
|
byte[] value, Put put) throws IOException {
|
||||||
|
|
|
@ -115,20 +115,6 @@ public final class HTableWrapper implements Table {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @deprecated in 0.99 since setting clearBufferOnFail is deprecated.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public Result getRowOrBefore(byte[] row, byte[] family)
|
|
||||||
throws IOException {
|
|
||||||
Scan scan = Scan.createGetClosestRowOrBeforeReverseScan(row);
|
|
||||||
Result startRowResult = null;
|
|
||||||
try (ResultScanner resultScanner = this.table.getScanner(scan)) {
|
|
||||||
startRowResult = resultScanner.next();
|
|
||||||
}
|
|
||||||
return startRowResult;
|
|
||||||
}
|
|
||||||
|
|
||||||
public Result get(Get get) throws IOException {
|
public Result get(Get get) throws IOException {
|
||||||
return table.get(get);
|
return table.get(get);
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,14 +19,14 @@ import java.io.IOException;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.NavigableSet;
|
import java.util.NavigableSet;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.Cell;
|
import org.apache.hadoop.hbase.Cell;
|
||||||
import org.apache.hadoop.hbase.CoprocessorEnvironment;
|
import org.apache.hadoop.hbase.CoprocessorEnvironment;
|
||||||
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
|
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.hbase.client.Append;
|
import org.apache.hadoop.hbase.client.Append;
|
||||||
import org.apache.hadoop.hbase.client.Delete;
|
import org.apache.hadoop.hbase.client.Delete;
|
||||||
import org.apache.hadoop.hbase.client.Durability;
|
import org.apache.hadoop.hbase.client.Durability;
|
||||||
|
@ -54,9 +54,9 @@ import org.apache.hadoop.hbase.regionserver.StoreFile;
|
||||||
import org.apache.hadoop.hbase.regionserver.StoreFile.Reader;
|
import org.apache.hadoop.hbase.regionserver.StoreFile.Reader;
|
||||||
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
|
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
|
||||||
import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
|
import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
|
||||||
import org.apache.hadoop.hbase.wal.WALKey;
|
|
||||||
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
|
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
|
||||||
import org.apache.hadoop.hbase.util.Pair;
|
import org.apache.hadoop.hbase.util.Pair;
|
||||||
|
import org.apache.hadoop.hbase.wal.WALKey;
|
||||||
|
|
||||||
import com.google.common.collect.ImmutableList;
|
import com.google.common.collect.ImmutableList;
|
||||||
|
|
||||||
|
@ -120,7 +120,7 @@ public abstract class BaseRegionObserver implements RegionObserver {
|
||||||
@Override
|
@Override
|
||||||
public void preSplit(ObserverContext<RegionCoprocessorEnvironment> e) throws IOException {
|
public void preSplit(ObserverContext<RegionCoprocessorEnvironment> e) throws IOException {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void preSplit(ObserverContext<RegionCoprocessorEnvironment> c,
|
public void preSplit(ObserverContext<RegionCoprocessorEnvironment> c,
|
||||||
byte[] splitRow) throws IOException {
|
byte[] splitRow) throws IOException {
|
||||||
|
@ -130,22 +130,22 @@ public abstract class BaseRegionObserver implements RegionObserver {
|
||||||
public void preSplitBeforePONR(ObserverContext<RegionCoprocessorEnvironment> ctx,
|
public void preSplitBeforePONR(ObserverContext<RegionCoprocessorEnvironment> ctx,
|
||||||
byte[] splitKey, List<Mutation> metaEntries) throws IOException {
|
byte[] splitKey, List<Mutation> metaEntries) throws IOException {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void preSplitAfterPONR(
|
public void preSplitAfterPONR(
|
||||||
ObserverContext<RegionCoprocessorEnvironment> ctx) throws IOException {
|
ObserverContext<RegionCoprocessorEnvironment> ctx) throws IOException {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void preRollBackSplit(ObserverContext<RegionCoprocessorEnvironment> ctx)
|
public void preRollBackSplit(ObserverContext<RegionCoprocessorEnvironment> ctx)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void postRollBackSplit(
|
public void postRollBackSplit(
|
||||||
ObserverContext<RegionCoprocessorEnvironment> ctx) throws IOException {
|
ObserverContext<RegionCoprocessorEnvironment> ctx) throws IOException {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void postCompleteSplit(
|
public void postCompleteSplit(
|
||||||
ObserverContext<RegionCoprocessorEnvironment> ctx) throws IOException {
|
ObserverContext<RegionCoprocessorEnvironment> ctx) throws IOException {
|
||||||
|
@ -218,18 +218,6 @@ public abstract class BaseRegionObserver implements RegionObserver {
|
||||||
postCompact(e, store, resultFile);
|
postCompact(e, store, resultFile);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public void preGetClosestRowBefore(final ObserverContext<RegionCoprocessorEnvironment> e,
|
|
||||||
final byte [] row, final byte [] family, final Result result)
|
|
||||||
throws IOException {
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void postGetClosestRowBefore(final ObserverContext<RegionCoprocessorEnvironment> e,
|
|
||||||
final byte [] row, final byte [] family, final Result result)
|
|
||||||
throws IOException {
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void preGetOp(final ObserverContext<RegionCoprocessorEnvironment> e,
|
public void preGetOp(final ObserverContext<RegionCoprocessorEnvironment> e,
|
||||||
final Get get, final List<Cell> results) throws IOException {
|
final Get get, final List<Cell> results) throws IOException {
|
||||||
|
@ -253,12 +241,12 @@ public abstract class BaseRegionObserver implements RegionObserver {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void prePut(final ObserverContext<RegionCoprocessorEnvironment> e,
|
public void prePut(final ObserverContext<RegionCoprocessorEnvironment> e,
|
||||||
final Put put, final WALEdit edit, final Durability durability) throws IOException {
|
final Put put, final WALEdit edit, final Durability durability) throws IOException {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void postPut(final ObserverContext<RegionCoprocessorEnvironment> e,
|
public void postPut(final ObserverContext<RegionCoprocessorEnvironment> e,
|
||||||
final Put put, final WALEdit edit, final Durability durability) throws IOException {
|
final Put put, final WALEdit edit, final Durability durability) throws IOException {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -278,7 +266,7 @@ public abstract class BaseRegionObserver implements RegionObserver {
|
||||||
final Delete delete, final WALEdit edit, final Durability durability)
|
final Delete delete, final WALEdit edit, final Durability durability)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void preBatchMutate(final ObserverContext<RegionCoprocessorEnvironment> c,
|
public void preBatchMutate(final ObserverContext<RegionCoprocessorEnvironment> c,
|
||||||
final MiniBatchOperationInProgress<Mutation> miniBatchOp) throws IOException {
|
final MiniBatchOperationInProgress<Mutation> miniBatchOp) throws IOException {
|
||||||
|
|
|
@ -382,7 +382,7 @@ public interface RegionObserver extends Coprocessor {
|
||||||
void preSplitBeforePONR(final ObserverContext<RegionCoprocessorEnvironment> ctx,
|
void preSplitBeforePONR(final ObserverContext<RegionCoprocessorEnvironment> ctx,
|
||||||
byte[] splitKey, List<Mutation> metaEntries) throws IOException;
|
byte[] splitKey, List<Mutation> metaEntries) throws IOException;
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This will be called after PONR step as part of split transaction
|
* This will be called after PONR step as part of split transaction
|
||||||
* Calling {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} has no
|
* Calling {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} has no
|
||||||
|
@ -391,9 +391,9 @@ public interface RegionObserver extends Coprocessor {
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
void preSplitAfterPONR(final ObserverContext<RegionCoprocessorEnvironment> ctx) throws IOException;
|
void preSplitAfterPONR(final ObserverContext<RegionCoprocessorEnvironment> ctx) throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This will be called before the roll back of the split region is completed
|
* This will be called before the roll back of the split region is completed
|
||||||
* @param ctx
|
* @param ctx
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
|
@ -419,7 +419,7 @@ public interface RegionObserver extends Coprocessor {
|
||||||
* Called before the region is reported as closed to the master.
|
* Called before the region is reported as closed to the master.
|
||||||
* @param c the environment provided by the region server
|
* @param c the environment provided by the region server
|
||||||
* @param abortRequested true if the region server is aborting
|
* @param abortRequested true if the region server is aborting
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
void preClose(final ObserverContext<RegionCoprocessorEnvironment> c,
|
void preClose(final ObserverContext<RegionCoprocessorEnvironment> c,
|
||||||
boolean abortRequested) throws IOException;
|
boolean abortRequested) throws IOException;
|
||||||
|
@ -432,40 +432,6 @@ public interface RegionObserver extends Coprocessor {
|
||||||
void postClose(final ObserverContext<RegionCoprocessorEnvironment> c,
|
void postClose(final ObserverContext<RegionCoprocessorEnvironment> c,
|
||||||
boolean abortRequested);
|
boolean abortRequested);
|
||||||
|
|
||||||
/**
|
|
||||||
* Called before a client makes a GetClosestRowBefore request.
|
|
||||||
* <p>
|
|
||||||
* Call CoprocessorEnvironment#bypass to skip default actions
|
|
||||||
* <p>
|
|
||||||
* Call CoprocessorEnvironment#complete to skip any subsequent chained
|
|
||||||
* coprocessors
|
|
||||||
* @param c the environment provided by the region server
|
|
||||||
* @param row the row
|
|
||||||
* @param family the family
|
|
||||||
* @param result The result to return to the client if default processing
|
|
||||||
* is bypassed. Can be modified. Will not be used if default processing
|
|
||||||
* is not bypassed.
|
|
||||||
* @throws IOException if an error occurred on the coprocessor
|
|
||||||
*/
|
|
||||||
void preGetClosestRowBefore(final ObserverContext<RegionCoprocessorEnvironment> c,
|
|
||||||
final byte [] row, final byte [] family, final Result result)
|
|
||||||
throws IOException;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Called after a client makes a GetClosestRowBefore request.
|
|
||||||
* <p>
|
|
||||||
* Call CoprocessorEnvironment#complete to skip any subsequent chained
|
|
||||||
* coprocessors
|
|
||||||
* @param c the environment provided by the region server
|
|
||||||
* @param row the row
|
|
||||||
* @param family the desired family
|
|
||||||
* @param result the result to return to the client, modify as necessary
|
|
||||||
* @throws IOException if an error occurred on the coprocessor
|
|
||||||
*/
|
|
||||||
void postGetClosestRowBefore(final ObserverContext<RegionCoprocessorEnvironment> c,
|
|
||||||
final byte [] row, final byte [] family, final Result result)
|
|
||||||
throws IOException;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Called before the client performs a Get
|
* Called before the client performs a Get
|
||||||
* <p>
|
* <p>
|
||||||
|
@ -543,7 +509,7 @@ public interface RegionObserver extends Coprocessor {
|
||||||
* @param durability Persistence guarantee for this Put
|
* @param durability Persistence guarantee for this Put
|
||||||
* @throws IOException if an error occurred on the coprocessor
|
* @throws IOException if an error occurred on the coprocessor
|
||||||
*/
|
*/
|
||||||
void prePut(final ObserverContext<RegionCoprocessorEnvironment> c,
|
void prePut(final ObserverContext<RegionCoprocessorEnvironment> c,
|
||||||
final Put put, final WALEdit edit, final Durability durability)
|
final Put put, final WALEdit edit, final Durability durability)
|
||||||
throws IOException;
|
throws IOException;
|
||||||
|
|
||||||
|
@ -558,7 +524,7 @@ public interface RegionObserver extends Coprocessor {
|
||||||
* @param durability Persistence guarantee for this Put
|
* @param durability Persistence guarantee for this Put
|
||||||
* @throws IOException if an error occurred on the coprocessor
|
* @throws IOException if an error occurred on the coprocessor
|
||||||
*/
|
*/
|
||||||
void postPut(final ObserverContext<RegionCoprocessorEnvironment> c,
|
void postPut(final ObserverContext<RegionCoprocessorEnvironment> c,
|
||||||
final Put put, final WALEdit edit, final Durability durability)
|
final Put put, final WALEdit edit, final Durability durability)
|
||||||
throws IOException;
|
throws IOException;
|
||||||
|
|
||||||
|
@ -575,7 +541,7 @@ public interface RegionObserver extends Coprocessor {
|
||||||
* @param durability Persistence guarantee for this Delete
|
* @param durability Persistence guarantee for this Delete
|
||||||
* @throws IOException if an error occurred on the coprocessor
|
* @throws IOException if an error occurred on the coprocessor
|
||||||
*/
|
*/
|
||||||
void preDelete(final ObserverContext<RegionCoprocessorEnvironment> c,
|
void preDelete(final ObserverContext<RegionCoprocessorEnvironment> c,
|
||||||
final Delete delete, final WALEdit edit, final Durability durability)
|
final Delete delete, final WALEdit edit, final Durability durability)
|
||||||
throws IOException;
|
throws IOException;
|
||||||
/**
|
/**
|
||||||
|
@ -611,7 +577,7 @@ public interface RegionObserver extends Coprocessor {
|
||||||
void postDelete(final ObserverContext<RegionCoprocessorEnvironment> c,
|
void postDelete(final ObserverContext<RegionCoprocessorEnvironment> c,
|
||||||
final Delete delete, final WALEdit edit, final Durability durability)
|
final Delete delete, final WALEdit edit, final Durability durability)
|
||||||
throws IOException;
|
throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This will be called for every batch mutation operation happening at the server. This will be
|
* This will be called for every batch mutation operation happening at the server. This will be
|
||||||
* called after acquiring the locks on the mutating rows and after applying the proper timestamp
|
* called after acquiring the locks on the mutating rows and after applying the proper timestamp
|
||||||
|
@ -658,7 +624,7 @@ public interface RegionObserver extends Coprocessor {
|
||||||
* Called after the completion of batch put/delete and will be called even if the batch operation
|
* Called after the completion of batch put/delete and will be called even if the batch operation
|
||||||
* fails
|
* fails
|
||||||
* @param ctx
|
* @param ctx
|
||||||
* @param miniBatchOp
|
* @param miniBatchOp
|
||||||
* @param success true if batch operation is successful otherwise false.
|
* @param success true if batch operation is successful otherwise false.
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
|
@ -679,7 +645,7 @@ public interface RegionObserver extends Coprocessor {
|
||||||
* @param compareOp the comparison operation
|
* @param compareOp the comparison operation
|
||||||
* @param comparator the comparator
|
* @param comparator the comparator
|
||||||
* @param put data to put if check succeeds
|
* @param put data to put if check succeeds
|
||||||
* @param result
|
* @param result
|
||||||
* @return the return value to return to client if bypassing default
|
* @return the return value to return to client if bypassing default
|
||||||
* processing
|
* processing
|
||||||
* @throws IOException if an error occurred on the coprocessor
|
* @throws IOException if an error occurred on the coprocessor
|
||||||
|
@ -693,8 +659,8 @@ public interface RegionObserver extends Coprocessor {
|
||||||
/**
|
/**
|
||||||
* Called before checkAndPut but after acquiring rowlock.
|
* Called before checkAndPut but after acquiring rowlock.
|
||||||
* <p>
|
* <p>
|
||||||
* <b>Note:</b> Caution to be taken for not doing any long time operation in this hook.
|
* <b>Note:</b> Caution to be taken for not doing any long time operation in this hook.
|
||||||
* Row will be locked for longer time. Trying to acquire lock on another row, within this,
|
* Row will be locked for longer time. Trying to acquire lock on another row, within this,
|
||||||
* can lead to potential deadlock.
|
* can lead to potential deadlock.
|
||||||
* <p>
|
* <p>
|
||||||
* Call CoprocessorEnvironment#bypass to skip default actions
|
* Call CoprocessorEnvironment#bypass to skip default actions
|
||||||
|
@ -708,14 +674,14 @@ public interface RegionObserver extends Coprocessor {
|
||||||
* @param compareOp the comparison operation
|
* @param compareOp the comparison operation
|
||||||
* @param comparator the comparator
|
* @param comparator the comparator
|
||||||
* @param put data to put if check succeeds
|
* @param put data to put if check succeeds
|
||||||
* @param result
|
* @param result
|
||||||
* @return the return value to return to client if bypassing default
|
* @return the return value to return to client if bypassing default
|
||||||
* processing
|
* processing
|
||||||
* @throws IOException if an error occurred on the coprocessor
|
* @throws IOException if an error occurred on the coprocessor
|
||||||
*/
|
*/
|
||||||
boolean preCheckAndPutAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> c,
|
boolean preCheckAndPutAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> c,
|
||||||
final byte[] row, final byte[] family, final byte[] qualifier, final CompareOp compareOp,
|
final byte[] row, final byte[] family, final byte[] qualifier, final CompareOp compareOp,
|
||||||
final ByteArrayComparable comparator, final Put put,
|
final ByteArrayComparable comparator, final Put put,
|
||||||
final boolean result) throws IOException;
|
final boolean result) throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -754,7 +720,7 @@ public interface RegionObserver extends Coprocessor {
|
||||||
* @param compareOp the comparison operation
|
* @param compareOp the comparison operation
|
||||||
* @param comparator the comparator
|
* @param comparator the comparator
|
||||||
* @param delete delete to commit if check succeeds
|
* @param delete delete to commit if check succeeds
|
||||||
* @param result
|
* @param result
|
||||||
* @return the value to return to client if bypassing default processing
|
* @return the value to return to client if bypassing default processing
|
||||||
* @throws IOException if an error occurred on the coprocessor
|
* @throws IOException if an error occurred on the coprocessor
|
||||||
*/
|
*/
|
||||||
|
@ -767,8 +733,8 @@ public interface RegionObserver extends Coprocessor {
|
||||||
/**
|
/**
|
||||||
* Called before checkAndDelete but after acquiring rowock.
|
* Called before checkAndDelete but after acquiring rowock.
|
||||||
* <p>
|
* <p>
|
||||||
* <b>Note:</b> Caution to be taken for not doing any long time operation in this hook.
|
* <b>Note:</b> Caution to be taken for not doing any long time operation in this hook.
|
||||||
* Row will be locked for longer time. Trying to acquire lock on another row, within this,
|
* Row will be locked for longer time. Trying to acquire lock on another row, within this,
|
||||||
* can lead to potential deadlock.
|
* can lead to potential deadlock.
|
||||||
* <p>
|
* <p>
|
||||||
* Call CoprocessorEnvironment#bypass to skip default actions
|
* Call CoprocessorEnvironment#bypass to skip default actions
|
||||||
|
@ -782,7 +748,7 @@ public interface RegionObserver extends Coprocessor {
|
||||||
* @param compareOp the comparison operation
|
* @param compareOp the comparison operation
|
||||||
* @param comparator the comparator
|
* @param comparator the comparator
|
||||||
* @param delete delete to commit if check succeeds
|
* @param delete delete to commit if check succeeds
|
||||||
* @param result
|
* @param result
|
||||||
* @return the value to return to client if bypassing default processing
|
* @return the value to return to client if bypassing default processing
|
||||||
* @throws IOException if an error occurred on the coprocessor
|
* @throws IOException if an error occurred on the coprocessor
|
||||||
*/
|
*/
|
||||||
|
@ -877,8 +843,8 @@ public interface RegionObserver extends Coprocessor {
|
||||||
/**
|
/**
|
||||||
* Called before Append but after acquiring rowlock.
|
* Called before Append but after acquiring rowlock.
|
||||||
* <p>
|
* <p>
|
||||||
* <b>Note:</b> Caution to be taken for not doing any long time operation in this hook.
|
* <b>Note:</b> Caution to be taken for not doing any long time operation in this hook.
|
||||||
* Row will be locked for longer time. Trying to acquire lock on another row, within this,
|
* Row will be locked for longer time. Trying to acquire lock on another row, within this,
|
||||||
* can lead to potential deadlock.
|
* can lead to potential deadlock.
|
||||||
* <p>
|
* <p>
|
||||||
* Call CoprocessorEnvironment#bypass to skip default actions
|
* Call CoprocessorEnvironment#bypass to skip default actions
|
||||||
|
@ -927,14 +893,14 @@ public interface RegionObserver extends Coprocessor {
|
||||||
/**
|
/**
|
||||||
* Called before Increment but after acquiring rowlock.
|
* Called before Increment but after acquiring rowlock.
|
||||||
* <p>
|
* <p>
|
||||||
* <b>Note:</b> Caution to be taken for not doing any long time operation in this hook.
|
* <b>Note:</b> Caution to be taken for not doing any long time operation in this hook.
|
||||||
* Row will be locked for longer time. Trying to acquire lock on another row, within this,
|
* Row will be locked for longer time. Trying to acquire lock on another row, within this,
|
||||||
* can lead to potential deadlock.
|
* can lead to potential deadlock.
|
||||||
* <p>
|
* <p>
|
||||||
* Call CoprocessorEnvironment#bypass to skip default actions
|
* Call CoprocessorEnvironment#bypass to skip default actions
|
||||||
* <p>
|
* <p>
|
||||||
* Call CoprocessorEnvironment#complete to skip any subsequent chained coprocessors
|
* Call CoprocessorEnvironment#complete to skip any subsequent chained coprocessors
|
||||||
*
|
*
|
||||||
* @param c
|
* @param c
|
||||||
* the environment provided by the region server
|
* the environment provided by the region server
|
||||||
* @param increment
|
* @param increment
|
||||||
|
@ -1227,7 +1193,7 @@ public interface RegionObserver extends Coprocessor {
|
||||||
* Called before creation of Reader for a store file.
|
* Called before creation of Reader for a store file.
|
||||||
* Calling {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} has no
|
* Calling {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} has no
|
||||||
* effect in this hook.
|
* effect in this hook.
|
||||||
*
|
*
|
||||||
* @param ctx the environment provided by the region server
|
* @param ctx the environment provided by the region server
|
||||||
* @param fs fileystem to read from
|
* @param fs fileystem to read from
|
||||||
* @param p path to the file
|
* @param p path to the file
|
||||||
|
@ -1246,7 +1212,7 @@ public interface RegionObserver extends Coprocessor {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Called after the creation of Reader for a store file.
|
* Called after the creation of Reader for a store file.
|
||||||
*
|
*
|
||||||
* @param ctx the environment provided by the region server
|
* @param ctx the environment provided by the region server
|
||||||
* @param fs fileystem to read from
|
* @param fs fileystem to read from
|
||||||
* @param p path to the file
|
* @param p path to the file
|
||||||
|
|
|
@ -31,15 +31,14 @@ import java.util.concurrent.atomic.AtomicLong;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hbase.Cell;
|
import org.apache.hadoop.hbase.Cell;
|
||||||
import org.apache.hadoop.hbase.CellComparator;
|
import org.apache.hadoop.hbase.CellComparator;
|
||||||
import org.apache.hadoop.hbase.CellUtil;
|
import org.apache.hadoop.hbase.CellUtil;
|
||||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
|
||||||
import org.apache.hadoop.hbase.KeyValue;
|
import org.apache.hadoop.hbase.KeyValue;
|
||||||
import org.apache.hadoop.hbase.KeyValueUtil;
|
import org.apache.hadoop.hbase.KeyValueUtil;
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.client.Scan;
|
import org.apache.hadoop.hbase.client.Scan;
|
||||||
import org.apache.hadoop.hbase.util.ByteRange;
|
import org.apache.hadoop.hbase.util.ByteRange;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
@ -384,85 +383,6 @@ public class DefaultMemStore implements MemStore {
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @param state column/delete tracking state
|
|
||||||
*/
|
|
||||||
@Override
|
|
||||||
public void getRowKeyAtOrBefore(final GetClosestRowBeforeTracker state) {
|
|
||||||
getRowKeyAtOrBefore(cellSet, state);
|
|
||||||
getRowKeyAtOrBefore(snapshot, state);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* @param set
|
|
||||||
* @param state Accumulates deletes and candidates.
|
|
||||||
*/
|
|
||||||
private void getRowKeyAtOrBefore(final NavigableSet<Cell> set,
|
|
||||||
final GetClosestRowBeforeTracker state) {
|
|
||||||
if (set.isEmpty()) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
if (!walkForwardInSingleRow(set, state.getTargetKey(), state)) {
|
|
||||||
// Found nothing in row. Try backing up.
|
|
||||||
getRowKeyBefore(set, state);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Walk forward in a row from <code>firstOnRow</code>. Presumption is that
|
|
||||||
* we have been passed the first possible key on a row. As we walk forward
|
|
||||||
* we accumulate deletes until we hit a candidate on the row at which point
|
|
||||||
* we return.
|
|
||||||
* @param set
|
|
||||||
* @param firstOnRow First possible key on this row.
|
|
||||||
* @param state
|
|
||||||
* @return True if we found a candidate walking this row.
|
|
||||||
*/
|
|
||||||
private boolean walkForwardInSingleRow(final SortedSet<Cell> set,
|
|
||||||
final Cell firstOnRow, final GetClosestRowBeforeTracker state) {
|
|
||||||
boolean foundCandidate = false;
|
|
||||||
SortedSet<Cell> tail = set.tailSet(firstOnRow);
|
|
||||||
if (tail.isEmpty()) return foundCandidate;
|
|
||||||
for (Iterator<Cell> i = tail.iterator(); i.hasNext();) {
|
|
||||||
Cell kv = i.next();
|
|
||||||
// Did we go beyond the target row? If so break.
|
|
||||||
if (state.isTooFar(kv, firstOnRow)) break;
|
|
||||||
if (state.isExpired(kv)) {
|
|
||||||
i.remove();
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
// If we added something, this row is a contender. break.
|
|
||||||
if (state.handle(kv)) {
|
|
||||||
foundCandidate = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return foundCandidate;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Walk backwards through the passed set a row at a time until we run out of
|
|
||||||
* set or until we get a candidate.
|
|
||||||
* @param set
|
|
||||||
* @param state
|
|
||||||
*/
|
|
||||||
private void getRowKeyBefore(NavigableSet<Cell> set,
|
|
||||||
final GetClosestRowBeforeTracker state) {
|
|
||||||
Cell firstOnRow = state.getTargetKey();
|
|
||||||
for (Member p = memberOfPreviousRow(set, state, firstOnRow);
|
|
||||||
p != null; p = memberOfPreviousRow(p.set, state, firstOnRow)) {
|
|
||||||
// Make sure we don't fall out of our table.
|
|
||||||
if (!state.isTargetTable(p.cell)) break;
|
|
||||||
// Stop looking if we've exited the better candidate range.
|
|
||||||
if (!state.isBetterCandidate(p.cell)) break;
|
|
||||||
// Make into firstOnRow
|
|
||||||
firstOnRow = new KeyValue(p.cell.getRowArray(), p.cell.getRowOffset(), p.cell.getRowLength(),
|
|
||||||
HConstants.LATEST_TIMESTAMP);
|
|
||||||
// If we find something, break;
|
|
||||||
if (walkForwardInSingleRow(p.set, firstOnRow, state)) break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Only used by tests. TODO: Remove
|
* Only used by tests. TODO: Remove
|
||||||
*
|
*
|
||||||
|
@ -622,42 +542,6 @@ public class DefaultMemStore implements MemStore {
|
||||||
return addedSize;
|
return addedSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Immutable data structure to hold member found in set and the set it was
|
|
||||||
* found in. Include set because it is carrying context.
|
|
||||||
*/
|
|
||||||
private static class Member {
|
|
||||||
final Cell cell;
|
|
||||||
final NavigableSet<Cell> set;
|
|
||||||
Member(final NavigableSet<Cell> s, final Cell kv) {
|
|
||||||
this.cell = kv;
|
|
||||||
this.set = s;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* @param set Set to walk back in. Pass a first in row or we'll return
|
|
||||||
* same row (loop).
|
|
||||||
* @param state Utility and context.
|
|
||||||
* @param firstOnRow First item on the row after the one we want to find a
|
|
||||||
* member in.
|
|
||||||
* @return Null or member of row previous to <code>firstOnRow</code>
|
|
||||||
*/
|
|
||||||
private Member memberOfPreviousRow(NavigableSet<Cell> set,
|
|
||||||
final GetClosestRowBeforeTracker state, final Cell firstOnRow) {
|
|
||||||
NavigableSet<Cell> head = set.headSet(firstOnRow, false);
|
|
||||||
if (head.isEmpty()) return null;
|
|
||||||
for (Iterator<Cell> i = head.descendingIterator(); i.hasNext();) {
|
|
||||||
Cell found = i.next();
|
|
||||||
if (state.isExpired(found)) {
|
|
||||||
i.remove();
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
return new Member(head, found);
|
|
||||||
}
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return scanner on memstore and snapshot in this order.
|
* @return scanner on memstore and snapshot in this order.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -1,242 +0,0 @@
|
||||||
/*
|
|
||||||
*
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
* or more contributor license agreements. See the NOTICE file
|
|
||||||
* distributed with this work for additional information
|
|
||||||
* regarding copyright ownership. The ASF licenses this file
|
|
||||||
* to you under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package org.apache.hadoop.hbase.regionserver;
|
|
||||||
|
|
||||||
import java.util.NavigableMap;
|
|
||||||
import java.util.NavigableSet;
|
|
||||||
import java.util.TreeMap;
|
|
||||||
import java.util.TreeSet;
|
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
|
||||||
import org.apache.hadoop.hbase.Cell;
|
|
||||||
import org.apache.hadoop.hbase.CellComparator;
|
|
||||||
import org.apache.hadoop.hbase.CellUtil;
|
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
|
||||||
import org.apache.hadoop.hbase.KeyValue;
|
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* State and utility processing {@link HRegion#getClosestRowBefore(byte[], byte[])}.
|
|
||||||
* Like {@link ScanQueryMatcher} and {@link ScanDeleteTracker} but does not
|
|
||||||
* implement the {@link DeleteTracker} interface since state spans rows (There
|
|
||||||
* is no update nor reset method).
|
|
||||||
*/
|
|
||||||
@InterfaceAudience.Private
|
|
||||||
class GetClosestRowBeforeTracker {
|
|
||||||
private final KeyValue targetkey;
|
|
||||||
// Any cell w/ a ts older than this is expired.
|
|
||||||
private final long now;
|
|
||||||
private final long oldestUnexpiredTs;
|
|
||||||
private Cell candidate = null;
|
|
||||||
private final CellComparator cellComparator;
|
|
||||||
// Flag for whether we're doing getclosest on a metaregion.
|
|
||||||
private final boolean metaregion;
|
|
||||||
// Offset and length into targetkey demarking table name (if in a metaregion).
|
|
||||||
private final int rowoffset;
|
|
||||||
private final int tablenamePlusDelimiterLength;
|
|
||||||
|
|
||||||
// Deletes keyed by row. Comparator compares on row portion of KeyValue only.
|
|
||||||
private final NavigableMap<Cell, NavigableSet<Cell>> deletes;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @param c
|
|
||||||
* @param kv Presume first on row: i.e. empty column, maximum timestamp and
|
|
||||||
* a type of Type.Maximum
|
|
||||||
* @param ttl Time to live in ms for this Store
|
|
||||||
* @param metaregion True if this is hbase:meta or -ROOT- region.
|
|
||||||
*/
|
|
||||||
GetClosestRowBeforeTracker(final CellComparator c, final KeyValue kv,
|
|
||||||
final long ttl, final boolean metaregion) {
|
|
||||||
super();
|
|
||||||
this.metaregion = metaregion;
|
|
||||||
this.targetkey = kv;
|
|
||||||
// If we are in a metaregion, then our table name is the prefix on the
|
|
||||||
// targetkey.
|
|
||||||
this.rowoffset = kv.getRowOffset();
|
|
||||||
int l = -1;
|
|
||||||
if (metaregion) {
|
|
||||||
l = Bytes.searchDelimiterIndex(kv.getRowArray(), rowoffset, kv.getRowLength(),
|
|
||||||
HConstants.DELIMITER) - this.rowoffset;
|
|
||||||
}
|
|
||||||
this.tablenamePlusDelimiterLength = metaregion? l + 1: -1;
|
|
||||||
this.now = System.currentTimeMillis();
|
|
||||||
this.oldestUnexpiredTs = now - ttl;
|
|
||||||
this.cellComparator = c;
|
|
||||||
this.deletes = new TreeMap<Cell, NavigableSet<Cell>>(new CellComparator.RowComparator());
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Add the specified KeyValue to the list of deletes.
|
|
||||||
* @param kv
|
|
||||||
*/
|
|
||||||
private void addDelete(final Cell kv) {
|
|
||||||
NavigableSet<Cell> rowdeletes = this.deletes.get(kv);
|
|
||||||
if (rowdeletes == null) {
|
|
||||||
rowdeletes = new TreeSet<Cell>(this.cellComparator);
|
|
||||||
this.deletes.put(kv, rowdeletes);
|
|
||||||
}
|
|
||||||
rowdeletes.add(kv);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* @param kv Adds candidate if nearer the target than previous candidate.
|
|
||||||
* @return True if updated candidate.
|
|
||||||
*/
|
|
||||||
private boolean addCandidate(final Cell kv) {
|
|
||||||
if (!isDeleted(kv) && isBetterCandidate(kv)) {
|
|
||||||
this.candidate = kv;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
boolean isBetterCandidate(final Cell contender) {
|
|
||||||
return this.candidate == null ||
|
|
||||||
(this.cellComparator.compareRows(this.candidate, contender) < 0 &&
|
|
||||||
this.cellComparator.compareRows(contender, this.targetkey) <= 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Check if specified KeyValue buffer has been deleted by a previously
|
|
||||||
* seen delete.
|
|
||||||
* @param kv
|
|
||||||
* @return true is the specified KeyValue is deleted, false if not
|
|
||||||
*/
|
|
||||||
private boolean isDeleted(final Cell kv) {
|
|
||||||
if (this.deletes.isEmpty()) return false;
|
|
||||||
NavigableSet<Cell> rowdeletes = this.deletes.get(kv);
|
|
||||||
if (rowdeletes == null || rowdeletes.isEmpty()) return false;
|
|
||||||
return isDeleted(kv, rowdeletes);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Check if the specified KeyValue buffer has been deleted by a previously
|
|
||||||
* seen delete.
|
|
||||||
* @param kv
|
|
||||||
* @param ds
|
|
||||||
* @return True is the specified KeyValue is deleted, false if not
|
|
||||||
*/
|
|
||||||
public boolean isDeleted(final Cell kv, final NavigableSet<Cell> ds) {
|
|
||||||
if (deletes == null || deletes.isEmpty()) return false;
|
|
||||||
for (Cell d: ds) {
|
|
||||||
long kvts = kv.getTimestamp();
|
|
||||||
long dts = d.getTimestamp();
|
|
||||||
if (CellUtil.isDeleteFamily(d)) {
|
|
||||||
if (kvts <= dts) return true;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
// Check column
|
|
||||||
int ret = CellComparator.compareQualifiers(kv, d);
|
|
||||||
if (ret <= -1) {
|
|
||||||
// This delete is for an earlier column.
|
|
||||||
continue;
|
|
||||||
} else if (ret >= 1) {
|
|
||||||
// Beyond this kv.
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
// Check Timestamp
|
|
||||||
if (kvts > dts) return false;
|
|
||||||
|
|
||||||
// Check Type
|
|
||||||
switch (KeyValue.Type.codeToType(d.getTypeByte())) {
|
|
||||||
case Delete: return kvts == dts;
|
|
||||||
case DeleteColumn: return true;
|
|
||||||
default: continue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @param cell
|
|
||||||
* @return true if the cell is expired
|
|
||||||
*/
|
|
||||||
public boolean isExpired(final Cell cell) {
|
|
||||||
return cell.getTimestamp() < this.oldestUnexpiredTs ||
|
|
||||||
HStore.isCellTTLExpired(cell, this.oldestUnexpiredTs, this.now);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Handle keys whose values hold deletes.
|
|
||||||
* Add to the set of deletes and then if the candidate keys contain any that
|
|
||||||
* might match, then check for a match and remove it. Implies candidates
|
|
||||||
* is made with a Comparator that ignores key type.
|
|
||||||
* @param kv
|
|
||||||
* @return True if we removed <code>k</code> from <code>candidates</code>.
|
|
||||||
*/
|
|
||||||
boolean handleDeletes(final Cell kv) {
|
|
||||||
addDelete(kv);
|
|
||||||
boolean deleted = false;
|
|
||||||
if (!hasCandidate()) return deleted;
|
|
||||||
if (isDeleted(this.candidate)) {
|
|
||||||
this.candidate = null;
|
|
||||||
deleted = true;
|
|
||||||
}
|
|
||||||
return deleted;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Do right thing with passed key, add to deletes or add to candidates.
|
|
||||||
* @param kv
|
|
||||||
* @return True if we added a candidate
|
|
||||||
*/
|
|
||||||
boolean handle(final Cell kv) {
|
|
||||||
if (CellUtil.isDelete(kv)) {
|
|
||||||
handleDeletes(kv);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
return addCandidate(kv);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @return True if has candidate
|
|
||||||
*/
|
|
||||||
public boolean hasCandidate() {
|
|
||||||
return this.candidate != null;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @return Best candidate or null.
|
|
||||||
*/
|
|
||||||
public Cell getCandidate() {
|
|
||||||
return this.candidate;
|
|
||||||
}
|
|
||||||
|
|
||||||
public KeyValue getTargetKey() {
|
|
||||||
return this.targetkey;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @param kv Current kv
|
|
||||||
* @param firstOnRow on row kv.
|
|
||||||
* @return True if we went too far, past the target key.
|
|
||||||
*/
|
|
||||||
boolean isTooFar(final Cell kv, final Cell firstOnRow) {
|
|
||||||
return this.cellComparator.compareRows(kv, firstOnRow) > 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
boolean isTargetTable(final Cell kv) {
|
|
||||||
if (!metaregion) return true;
|
|
||||||
// Compare start of keys row. Compare including delimiter. Saves having
|
|
||||||
// to calculate where tablename ends in the candidate kv.
|
|
||||||
return Bytes.compareTo(this.targetkey.getRowArray(), this.rowoffset,
|
|
||||||
this.tablenamePlusDelimiterLength,
|
|
||||||
kv.getRowArray(), kv.getRowOffset(), this.tablenamePlusDelimiterLength) == 0;
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -2430,38 +2430,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
|
||||||
// get() methods for client use.
|
// get() methods for client use.
|
||||||
//////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
@Override
|
|
||||||
public Result getClosestRowBefore(final byte [] row, final byte [] family) throws IOException {
|
|
||||||
if (coprocessorHost != null) {
|
|
||||||
Result result = new Result();
|
|
||||||
if (coprocessorHost.preGetClosestRowBefore(row, family, result)) {
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// look across all the HStores for this region and determine what the
|
|
||||||
// closest key is across all column families, since the data may be sparse
|
|
||||||
checkRow(row, "getClosestRowBefore");
|
|
||||||
startRegionOperation(Operation.GET);
|
|
||||||
this.readRequestsCount.increment();
|
|
||||||
try {
|
|
||||||
Store store = getStore(family);
|
|
||||||
// get the closest key. (HStore.getRowKeyAtOrBefore can return null)
|
|
||||||
Cell key = store.getRowKeyAtOrBefore(row);
|
|
||||||
Result result = null;
|
|
||||||
if (key != null) {
|
|
||||||
Get get = new Get(CellUtil.cloneRow(key));
|
|
||||||
get.addFamily(family);
|
|
||||||
result = get(get);
|
|
||||||
}
|
|
||||||
if (coprocessorHost != null) {
|
|
||||||
coprocessorHost.postGetClosestRowBefore(row, family, result);
|
|
||||||
}
|
|
||||||
return result;
|
|
||||||
} finally {
|
|
||||||
closeRegionOperation(Operation.GET);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public RegionScanner getScanner(Scan scan) throws IOException {
|
public RegionScanner getScanner(Scan scan) throws IOException {
|
||||||
return getScanner(scan, null);
|
return getScanner(scan, null);
|
||||||
|
|
|
@ -1774,154 +1774,6 @@ public class HStore implements Store {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public Cell getRowKeyAtOrBefore(final byte[] row) throws IOException {
|
|
||||||
// If minVersions is set, we will not ignore expired KVs.
|
|
||||||
// As we're only looking for the latest matches, that should be OK.
|
|
||||||
// With minVersions > 0 we guarantee that any KV that has any version
|
|
||||||
// at all (expired or not) has at least one version that will not expire.
|
|
||||||
// Note that this method used to take a KeyValue as arguments. KeyValue
|
|
||||||
// can be back-dated, a row key cannot.
|
|
||||||
long ttlToUse = scanInfo.getMinVersions() > 0 ? Long.MAX_VALUE : this.scanInfo.getTtl();
|
|
||||||
|
|
||||||
KeyValue kv = new KeyValue(row, HConstants.LATEST_TIMESTAMP);
|
|
||||||
|
|
||||||
GetClosestRowBeforeTracker state = new GetClosestRowBeforeTracker(
|
|
||||||
this.comparator, kv, ttlToUse, this.getRegionInfo().isMetaRegion());
|
|
||||||
this.lock.readLock().lock();
|
|
||||||
try {
|
|
||||||
// First go to the memstore. Pick up deletes and candidates.
|
|
||||||
this.memstore.getRowKeyAtOrBefore(state);
|
|
||||||
// Check if match, if we got a candidate on the asked for 'kv' row.
|
|
||||||
// Process each relevant store file. Run through from newest to oldest.
|
|
||||||
Iterator<StoreFile> sfIterator = this.storeEngine.getStoreFileManager()
|
|
||||||
.getCandidateFilesForRowKeyBefore(state.getTargetKey());
|
|
||||||
while (sfIterator.hasNext()) {
|
|
||||||
StoreFile sf = sfIterator.next();
|
|
||||||
sfIterator.remove(); // Remove sf from iterator.
|
|
||||||
boolean haveNewCandidate = rowAtOrBeforeFromStoreFile(sf, state);
|
|
||||||
Cell candidate = state.getCandidate();
|
|
||||||
// we have an optimization here which stops the search if we find exact match.
|
|
||||||
if (candidate != null && CellUtil.matchingRow(candidate, row)) {
|
|
||||||
return candidate;
|
|
||||||
}
|
|
||||||
if (haveNewCandidate) {
|
|
||||||
sfIterator = this.storeEngine.getStoreFileManager().updateCandidateFilesForRowKeyBefore(
|
|
||||||
sfIterator, state.getTargetKey(), candidate);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return state.getCandidate();
|
|
||||||
} finally {
|
|
||||||
this.lock.readLock().unlock();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Check an individual MapFile for the row at or before a given row.
|
|
||||||
* @param f
|
|
||||||
* @param state
|
|
||||||
* @throws IOException
|
|
||||||
* @return True iff the candidate has been updated in the state.
|
|
||||||
*/
|
|
||||||
private boolean rowAtOrBeforeFromStoreFile(final StoreFile f,
|
|
||||||
final GetClosestRowBeforeTracker state)
|
|
||||||
throws IOException {
|
|
||||||
StoreFile.Reader r = f.getReader();
|
|
||||||
if (r == null) {
|
|
||||||
LOG.warn("StoreFile " + f + " has a null Reader");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
if (r.getEntries() == 0) {
|
|
||||||
LOG.warn("StoreFile " + f + " is a empty store file");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
// TODO: Cache these keys rather than make each time?
|
|
||||||
Cell firstKV = r.getFirstKey();
|
|
||||||
if (firstKV == null) return false;
|
|
||||||
Cell lastKV = r.getLastKey();
|
|
||||||
Cell firstOnRow = state.getTargetKey();
|
|
||||||
if (this.comparator.compareRows(lastKV, firstOnRow) < 0) {
|
|
||||||
// If last key in file is not of the target table, no candidates in this
|
|
||||||
// file. Return.
|
|
||||||
if (!state.isTargetTable(lastKV)) return false;
|
|
||||||
// If the row we're looking for is past the end of file, set search key to
|
|
||||||
// last key. TODO: Cache last and first key rather than make each time.
|
|
||||||
firstOnRow = CellUtil.createFirstOnRow(lastKV);
|
|
||||||
}
|
|
||||||
// Get a scanner that caches blocks and that uses pread.
|
|
||||||
HFileScanner scanner = r.getScanner(true, true, false);
|
|
||||||
// Seek scanner. If can't seek it, return.
|
|
||||||
if (!seekToScanner(scanner, firstOnRow, firstKV)) return false;
|
|
||||||
// If we found candidate on firstOnRow, just return. THIS WILL NEVER HAPPEN!
|
|
||||||
// Unlikely that there'll be an instance of actual first row in table.
|
|
||||||
if (walkForwardInSingleRow(scanner, firstOnRow, state)) return true;
|
|
||||||
// If here, need to start backing up.
|
|
||||||
while (scanner.seekBefore(firstOnRow)) {
|
|
||||||
Cell kv = scanner.getCell();
|
|
||||||
if (!state.isTargetTable(kv)) break;
|
|
||||||
if (!state.isBetterCandidate(kv)) break;
|
|
||||||
// Make new first on row.
|
|
||||||
firstOnRow = CellUtil.createFirstOnRow(kv);
|
|
||||||
// Seek scanner. If can't seek it, break.
|
|
||||||
if (!seekToScanner(scanner, firstOnRow, firstKV)) return false;
|
|
||||||
// If we find something, break;
|
|
||||||
if (walkForwardInSingleRow(scanner, firstOnRow, state)) return true;
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Seek the file scanner to firstOnRow or first entry in file.
|
|
||||||
* @param scanner
|
|
||||||
* @param firstOnRow
|
|
||||||
* @param firstKV
|
|
||||||
* @return True if we successfully seeked scanner.
|
|
||||||
* @throws IOException
|
|
||||||
*/
|
|
||||||
private boolean seekToScanner(final HFileScanner scanner,
|
|
||||||
final Cell firstOnRow,
|
|
||||||
final Cell firstKV)
|
|
||||||
throws IOException {
|
|
||||||
Cell kv = firstOnRow;
|
|
||||||
// If firstOnRow < firstKV, set to firstKV
|
|
||||||
if (this.comparator.compareRows(firstKV, firstOnRow) == 0) kv = firstKV;
|
|
||||||
int result = scanner.seekTo(kv);
|
|
||||||
return result != -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* When we come in here, we are probably at the kv just before we break into
|
|
||||||
* the row that firstOnRow is on. Usually need to increment one time to get
|
|
||||||
* on to the row we are interested in.
|
|
||||||
* @param scanner
|
|
||||||
* @param firstOnRow
|
|
||||||
* @param state
|
|
||||||
* @return True we found a candidate.
|
|
||||||
* @throws IOException
|
|
||||||
*/
|
|
||||||
private boolean walkForwardInSingleRow(final HFileScanner scanner,
|
|
||||||
final Cell firstOnRow,
|
|
||||||
final GetClosestRowBeforeTracker state)
|
|
||||||
throws IOException {
|
|
||||||
boolean foundCandidate = false;
|
|
||||||
do {
|
|
||||||
Cell kv = scanner.getCell();
|
|
||||||
// If we are not in the row, skip.
|
|
||||||
if (this.comparator.compareRows(kv, firstOnRow) < 0) continue;
|
|
||||||
// Did we go beyond the target row? If so break.
|
|
||||||
if (state.isTooFar(kv, firstOnRow)) break;
|
|
||||||
if (state.isExpired(kv)) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
// If we added something, this row is a contender. break.
|
|
||||||
if (state.handle(kv)) {
|
|
||||||
foundCandidate = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
} while(scanner.next());
|
|
||||||
return foundCandidate;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean canSplit() {
|
public boolean canSplit() {
|
||||||
this.lock.readLock().lock();
|
this.lock.readLock().lock();
|
||||||
|
|
|
@ -92,13 +92,6 @@ public interface MemStore extends HeapSize {
|
||||||
*/
|
*/
|
||||||
long delete(final Cell deleteCell);
|
long delete(final Cell deleteCell);
|
||||||
|
|
||||||
/**
|
|
||||||
* Find the key that matches <i>row</i> exactly, or the one that immediately precedes it. The
|
|
||||||
* target row key is set in state.
|
|
||||||
* @param state column/delete tracking state
|
|
||||||
*/
|
|
||||||
void getRowKeyAtOrBefore(final GetClosestRowBeforeTracker state);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Given the specs of a column, update it, first by inserting a new record,
|
* Given the specs of a column, update it, first by inserting a new record,
|
||||||
* then removing the old one. Since there is only 1 KeyValue involved, the memstoreTS
|
* then removing the old one. Since there is only 1 KeyValue involved, the memstoreTS
|
||||||
|
|
|
@ -1933,32 +1933,21 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
|
||||||
|
|
||||||
quota = getQuotaManager().checkQuota(region, OperationQuota.OperationType.GET);
|
quota = getQuotaManager().checkQuota(region, OperationQuota.OperationType.GET);
|
||||||
|
|
||||||
if (get.hasClosestRowBefore() && get.getClosestRowBefore()) {
|
Get clientGet = ProtobufUtil.toGet(get);
|
||||||
if (get.getColumnCount() != 1) {
|
if (get.getExistenceOnly() && region.getCoprocessorHost() != null) {
|
||||||
throw new DoNotRetryIOException(
|
existence = region.getCoprocessorHost().preExists(clientGet);
|
||||||
"get ClosestRowBefore supports one and only one family now, not "
|
}
|
||||||
+ get.getColumnCount() + " families");
|
if (existence == null) {
|
||||||
}
|
r = region.get(clientGet);
|
||||||
byte[] row = get.getRow().toByteArray();
|
if (get.getExistenceOnly()) {
|
||||||
byte[] family = get.getColumn(0).getFamily().toByteArray();
|
boolean exists = r.getExists();
|
||||||
r = region.getClosestRowBefore(row, family);
|
if (region.getCoprocessorHost() != null) {
|
||||||
} else {
|
exists = region.getCoprocessorHost().postExists(clientGet, exists);
|
||||||
Get clientGet = ProtobufUtil.toGet(get);
|
|
||||||
if (get.getExistenceOnly() && region.getCoprocessorHost() != null) {
|
|
||||||
existence = region.getCoprocessorHost().preExists(clientGet);
|
|
||||||
}
|
|
||||||
if (existence == null) {
|
|
||||||
r = region.get(clientGet);
|
|
||||||
if (get.getExistenceOnly()) {
|
|
||||||
boolean exists = r.getExists();
|
|
||||||
if (region.getCoprocessorHost() != null) {
|
|
||||||
exists = region.getCoprocessorHost().postExists(clientGet, exists);
|
|
||||||
}
|
|
||||||
existence = exists;
|
|
||||||
}
|
}
|
||||||
|
existence = exists;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (existence != null){
|
if (existence != null) {
|
||||||
ClientProtos.Result pbr =
|
ClientProtos.Result pbr =
|
||||||
ProtobufUtil.toResult(existence, region.getRegionInfo().getReplicaId() != 0);
|
ProtobufUtil.toResult(existence, region.getRegionInfo().getReplicaId() != 0);
|
||||||
builder.setResult(pbr);
|
builder.setResult(pbr);
|
||||||
|
@ -1974,8 +1963,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
|
||||||
throw new ServiceException(ie);
|
throw new ServiceException(ie);
|
||||||
} finally {
|
} finally {
|
||||||
if (regionServer.metricsRegionServer != null) {
|
if (regionServer.metricsRegionServer != null) {
|
||||||
regionServer.metricsRegionServer.updateGet(
|
regionServer.metricsRegionServer.updateGet(EnvironmentEdgeManager.currentTime() - before);
|
||||||
EnvironmentEdgeManager.currentTime() - before);
|
|
||||||
}
|
}
|
||||||
if (quota != null) {
|
if (quota != null) {
|
||||||
quota.close();
|
quota.close();
|
||||||
|
|
|
@ -380,17 +380,6 @@ public interface Region extends ConfigurationObserver {
|
||||||
*/
|
*/
|
||||||
List<Cell> get(Get get, boolean withCoprocessor) throws IOException;
|
List<Cell> get(Get get, boolean withCoprocessor) throws IOException;
|
||||||
|
|
||||||
/**
|
|
||||||
* Return all the data for the row that matches <i>row</i> exactly,
|
|
||||||
* or the one that immediately preceeds it, at or immediately before
|
|
||||||
* <i>ts</i>.
|
|
||||||
* @param row
|
|
||||||
* @param family
|
|
||||||
* @return result of the operation
|
|
||||||
* @throws IOException
|
|
||||||
*/
|
|
||||||
Result getClosestRowBefore(byte[] row, byte[] family) throws IOException;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Return an iterator that scans over the HRegion, returning the indicated
|
* Return an iterator that scans over the HRegion, returning the indicated
|
||||||
* columns and rows specified by the {@link Scan}.
|
* columns and rows specified by the {@link Scan}.
|
||||||
|
|
|
@ -782,41 +782,6 @@ public class RegionCoprocessorHost
|
||||||
|
|
||||||
// RegionObserver support
|
// RegionObserver support
|
||||||
|
|
||||||
/**
|
|
||||||
* @param row the row key
|
|
||||||
* @param family the family
|
|
||||||
* @param result the result set from the region
|
|
||||||
* @return true if default processing should be bypassed
|
|
||||||
* @exception IOException Exception
|
|
||||||
*/
|
|
||||||
public boolean preGetClosestRowBefore(final byte[] row, final byte[] family,
|
|
||||||
final Result result) throws IOException {
|
|
||||||
return execOperation(coprocessors.isEmpty() ? null : new RegionOperation() {
|
|
||||||
@Override
|
|
||||||
public void call(RegionObserver oserver, ObserverContext<RegionCoprocessorEnvironment> ctx)
|
|
||||||
throws IOException {
|
|
||||||
oserver.preGetClosestRowBefore(ctx, row, family, result);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @param row the row key
|
|
||||||
* @param family the family
|
|
||||||
* @param result the result set from the region
|
|
||||||
* @exception IOException Exception
|
|
||||||
*/
|
|
||||||
public void postGetClosestRowBefore(final byte[] row, final byte[] family,
|
|
||||||
final Result result) throws IOException {
|
|
||||||
execOperation(coprocessors.isEmpty() ? null : new RegionOperation() {
|
|
||||||
@Override
|
|
||||||
public void call(RegionObserver oserver, ObserverContext<RegionCoprocessorEnvironment> ctx)
|
|
||||||
throws IOException {
|
|
||||||
oserver.postGetClosestRowBefore(ctx, row, family, result);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param get the Get request
|
* @param get the Get request
|
||||||
* @return true if default processing should be bypassed
|
* @return true if default processing should be bypassed
|
||||||
|
|
|
@ -143,20 +143,6 @@ public interface Store extends HeapSize, StoreConfigInformation, PropagatingConf
|
||||||
*/
|
*/
|
||||||
void rollback(final Cell cell);
|
void rollback(final Cell cell);
|
||||||
|
|
||||||
/**
|
|
||||||
* Find the key that matches <i>row</i> exactly, or the one that immediately precedes it. WARNING:
|
|
||||||
* Only use this method on a table where writes occur with strictly increasing timestamps. This
|
|
||||||
* method assumes this pattern of writes in order to make it reasonably performant. Also our
|
|
||||||
* search is dependent on the axiom that deletes are for cells that are in the container that
|
|
||||||
* follows whether a memstore snapshot or a storefile, not for the current container: i.e. we'll
|
|
||||||
* see deletes before we come across cells we are to delete. Presumption is that the
|
|
||||||
* memstore#kvset is processed before memstore#snapshot and so on.
|
|
||||||
* @param row The row key of the targeted row.
|
|
||||||
* @return Found Cell or null if none found.
|
|
||||||
* @throws IOException
|
|
||||||
*/
|
|
||||||
Cell getRowKeyAtOrBefore(final byte[] row) throws IOException;
|
|
||||||
|
|
||||||
FileSystem getFileSystem();
|
FileSystem getFileSystem();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -661,7 +661,6 @@ public class AccessController extends BaseMasterAndRegionObserver
|
||||||
}
|
}
|
||||||
|
|
||||||
private enum OpType {
|
private enum OpType {
|
||||||
GET_CLOSEST_ROW_BEFORE("getClosestRowBefore"),
|
|
||||||
GET("get"),
|
GET("get"),
|
||||||
EXISTS("exists"),
|
EXISTS("exists"),
|
||||||
SCAN("scan"),
|
SCAN("scan"),
|
||||||
|
@ -1424,28 +1423,6 @@ public class AccessController extends BaseMasterAndRegionObserver
|
||||||
return scanner;
|
return scanner;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public void preGetClosestRowBefore(final ObserverContext<RegionCoprocessorEnvironment> c,
|
|
||||||
final byte [] row, final byte [] family, final Result result)
|
|
||||||
throws IOException {
|
|
||||||
assert family != null;
|
|
||||||
RegionCoprocessorEnvironment env = c.getEnvironment();
|
|
||||||
Map<byte[],? extends Collection<byte[]>> families = makeFamilyMap(family, null);
|
|
||||||
User user = getActiveUser();
|
|
||||||
AuthResult authResult = permissionGranted(OpType.GET_CLOSEST_ROW_BEFORE, user, env, families,
|
|
||||||
Action.READ);
|
|
||||||
if (!authResult.isAllowed() && cellFeaturesEnabled && !compatibleEarlyTermination) {
|
|
||||||
authResult.setAllowed(checkCoveringPermission(OpType.GET_CLOSEST_ROW_BEFORE, env, row,
|
|
||||||
families, HConstants.LATEST_TIMESTAMP, Action.READ));
|
|
||||||
authResult.setReason("Covering cell set");
|
|
||||||
}
|
|
||||||
logResult(authResult);
|
|
||||||
if (authorizationEnabled && !authResult.isAllowed()) {
|
|
||||||
throw new AccessDeniedException("Insufficient permissions " +
|
|
||||||
authResult.toContextString());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private void internalPreRead(final ObserverContext<RegionCoprocessorEnvironment> c,
|
private void internalPreRead(final ObserverContext<RegionCoprocessorEnvironment> c,
|
||||||
final Query query, OpType opType) throws IOException {
|
final Query query, OpType opType) throws IOException {
|
||||||
Filter filter = query.getFilter();
|
Filter filter = query.getFilter();
|
||||||
|
|
|
@ -92,6 +92,7 @@ import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
||||||
import org.apache.hadoop.hbase.regionserver.HStore;
|
import org.apache.hadoop.hbase.regionserver.HStore;
|
||||||
import org.apache.hadoop.hbase.regionserver.InternalScanner;
|
import org.apache.hadoop.hbase.regionserver.InternalScanner;
|
||||||
import org.apache.hadoop.hbase.regionserver.Region;
|
import org.apache.hadoop.hbase.regionserver.Region;
|
||||||
|
import org.apache.hadoop.hbase.regionserver.RegionScanner;
|
||||||
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
|
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
|
||||||
import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
|
import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
|
||||||
import org.apache.hadoop.hbase.regionserver.wal.MetricsWAL;
|
import org.apache.hadoop.hbase.regionserver.wal.MetricsWAL;
|
||||||
|
@ -4155,4 +4156,28 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
|
||||||
}
|
}
|
||||||
return supportedAlgos.toArray(new Algorithm[supportedAlgos.size()]);
|
return supportedAlgos.toArray(new Algorithm[supportedAlgos.size()]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public Result getClosestRowBefore(Region r, byte[] row, byte[] family) throws IOException {
|
||||||
|
Scan scan = new Scan(row);
|
||||||
|
scan.setSmall(true);
|
||||||
|
scan.setCaching(1);
|
||||||
|
scan.setReversed(true);
|
||||||
|
scan.addFamily(family);
|
||||||
|
try (RegionScanner scanner = r.getScanner(scan)) {
|
||||||
|
List<Cell> cells = new ArrayList<Cell>(1);
|
||||||
|
scanner.next(cells);
|
||||||
|
if (r.getRegionInfo().isMetaRegion() && !isTargetTable(row, cells.get(0))) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
return Result.create(cells);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private boolean isTargetTable(final byte[] inRow, Cell c) {
|
||||||
|
String inputRowString = Bytes.toString(inRow);
|
||||||
|
int i = inputRowString.indexOf(HConstants.DELIMITER);
|
||||||
|
String outputRowString = Bytes.toString(c.getRowArray(), c.getRowOffset(), c.getRowLength());
|
||||||
|
int o = outputRowString.indexOf(HConstants.DELIMITER);
|
||||||
|
return inputRowString.substring(0, i).equals(outputRowString.substring(0, o));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -4209,83 +4209,56 @@ public class TestFromClientSide {
|
||||||
region.flush(true);
|
region.flush(true);
|
||||||
|
|
||||||
Result result;
|
Result result;
|
||||||
Get get = null;
|
|
||||||
|
|
||||||
// Test before first that null is returned
|
// Test before first that null is returned
|
||||||
get = new Get(beforeFirstRow);
|
result = getReverseScanResult(table, beforeFirstRow,
|
||||||
get.setClosestRowBefore(true);
|
HConstants.CATALOG_FAMILY);
|
||||||
get.addFamily(HConstants.CATALOG_FAMILY);
|
assertNull(result);
|
||||||
result = table.get(get);
|
|
||||||
assertTrue(result.isEmpty());
|
|
||||||
|
|
||||||
// Test at first that first is returned
|
// Test at first that first is returned
|
||||||
get = new Get(firstRow);
|
result = getReverseScanResult(table, firstRow, HConstants.CATALOG_FAMILY);
|
||||||
get.setClosestRowBefore(true);
|
|
||||||
get.addFamily(HConstants.CATALOG_FAMILY);
|
|
||||||
result = table.get(get);
|
|
||||||
assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null));
|
assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null));
|
||||||
assertTrue(Bytes.equals(result.getRow(), firstRow));
|
assertTrue(Bytes.equals(result.getRow(), firstRow));
|
||||||
assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), one));
|
assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), one));
|
||||||
|
|
||||||
// Test in between first and second that first is returned
|
// Test in between first and second that first is returned
|
||||||
get = new Get(beforeSecondRow);
|
result = getReverseScanResult(table, beforeSecondRow, HConstants.CATALOG_FAMILY);
|
||||||
get.setClosestRowBefore(true);
|
|
||||||
get.addFamily(HConstants.CATALOG_FAMILY);
|
|
||||||
result = table.get(get);
|
|
||||||
assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null));
|
assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null));
|
||||||
assertTrue(Bytes.equals(result.getRow(), firstRow));
|
assertTrue(Bytes.equals(result.getRow(), firstRow));
|
||||||
assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), one));
|
assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), one));
|
||||||
|
|
||||||
// Test at second make sure second is returned
|
// Test at second make sure second is returned
|
||||||
get = new Get(secondRow);
|
result = getReverseScanResult(table, secondRow, HConstants.CATALOG_FAMILY);
|
||||||
get.setClosestRowBefore(true);
|
|
||||||
get.addFamily(HConstants.CATALOG_FAMILY);
|
|
||||||
result = table.get(get);
|
|
||||||
assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null));
|
assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null));
|
||||||
assertTrue(Bytes.equals(result.getRow(), secondRow));
|
assertTrue(Bytes.equals(result.getRow(), secondRow));
|
||||||
assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), two));
|
assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), two));
|
||||||
|
|
||||||
// Test in second and third, make sure second is returned
|
// Test in second and third, make sure second is returned
|
||||||
get = new Get(beforeThirdRow);
|
result = getReverseScanResult(table, beforeThirdRow, HConstants.CATALOG_FAMILY);
|
||||||
get.setClosestRowBefore(true);
|
|
||||||
get.addFamily(HConstants.CATALOG_FAMILY);
|
|
||||||
result = table.get(get);
|
|
||||||
assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null));
|
assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null));
|
||||||
assertTrue(Bytes.equals(result.getRow(), secondRow));
|
assertTrue(Bytes.equals(result.getRow(), secondRow));
|
||||||
assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), two));
|
assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), two));
|
||||||
|
|
||||||
// Test at third make sure third is returned
|
// Test at third make sure third is returned
|
||||||
get = new Get(thirdRow);
|
result = getReverseScanResult(table, thirdRow, HConstants.CATALOG_FAMILY);
|
||||||
get.setClosestRowBefore(true);
|
|
||||||
get.addFamily(HConstants.CATALOG_FAMILY);
|
|
||||||
result = table.get(get);
|
|
||||||
assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null));
|
assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null));
|
||||||
assertTrue(Bytes.equals(result.getRow(), thirdRow));
|
assertTrue(Bytes.equals(result.getRow(), thirdRow));
|
||||||
assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), three));
|
assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), three));
|
||||||
|
|
||||||
// Test in third and forth, make sure third is returned
|
// Test in third and forth, make sure third is returned
|
||||||
get = new Get(beforeForthRow);
|
result = getReverseScanResult(table, beforeForthRow, HConstants.CATALOG_FAMILY);
|
||||||
get.setClosestRowBefore(true);
|
|
||||||
get.addFamily(HConstants.CATALOG_FAMILY);
|
|
||||||
result = table.get(get);
|
|
||||||
assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null));
|
assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null));
|
||||||
assertTrue(Bytes.equals(result.getRow(), thirdRow));
|
assertTrue(Bytes.equals(result.getRow(), thirdRow));
|
||||||
assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), three));
|
assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), three));
|
||||||
|
|
||||||
// Test at forth make sure forth is returned
|
// Test at forth make sure forth is returned
|
||||||
get = new Get(forthRow);
|
result = getReverseScanResult(table, forthRow, HConstants.CATALOG_FAMILY);
|
||||||
get.setClosestRowBefore(true);
|
|
||||||
get.addFamily(HConstants.CATALOG_FAMILY);
|
|
||||||
result = table.get(get);
|
|
||||||
assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null));
|
assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null));
|
||||||
assertTrue(Bytes.equals(result.getRow(), forthRow));
|
assertTrue(Bytes.equals(result.getRow(), forthRow));
|
||||||
assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), four));
|
assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), four));
|
||||||
|
|
||||||
// Test after forth make sure forth is returned
|
// Test after forth make sure forth is returned
|
||||||
get = new Get(Bytes.add(forthRow, one));
|
result = getReverseScanResult(table, Bytes.add(forthRow, one), HConstants.CATALOG_FAMILY);
|
||||||
get.setClosestRowBefore(true);
|
|
||||||
get.addFamily(HConstants.CATALOG_FAMILY);
|
|
||||||
result = table.get(get);
|
|
||||||
assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null));
|
assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null));
|
||||||
assertTrue(Bytes.equals(result.getRow(), forthRow));
|
assertTrue(Bytes.equals(result.getRow(), forthRow));
|
||||||
assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), four));
|
assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), four));
|
||||||
|
@ -4293,6 +4266,17 @@ public class TestFromClientSide {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private Result getReverseScanResult(Table table, byte[] row, byte[] fam) throws IOException {
|
||||||
|
Scan scan = new Scan(row);
|
||||||
|
scan.setSmall(true);
|
||||||
|
scan.setReversed(true);
|
||||||
|
scan.setCaching(1);
|
||||||
|
scan.addFamily(fam);
|
||||||
|
try (ResultScanner scanner = table.getScanner(scan)) {
|
||||||
|
return scanner.next();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* For HBASE-2156
|
* For HBASE-2156
|
||||||
* @throws Exception
|
* @throws Exception
|
||||||
|
|
|
@ -85,7 +85,6 @@ public class TestFromClientSideNoCodec {
|
||||||
// Check getRowOrBefore
|
// Check getRowOrBefore
|
||||||
byte[] f = fs[0];
|
byte[] f = fs[0];
|
||||||
Get get = new Get(row);
|
Get get = new Get(row);
|
||||||
get.setClosestRowBefore(true);
|
|
||||||
get.addFamily(f);
|
get.addFamily(f);
|
||||||
r = ht.get(get);
|
r = ht.get(get);
|
||||||
assertTrue(r.toString(), r.containsColumn(f, f));
|
assertTrue(r.toString(), r.containsColumn(f, f));
|
||||||
|
|
|
@ -102,8 +102,6 @@ public class SimpleRegionObserver extends BaseRegionObserver {
|
||||||
final AtomicInteger ctPreDeleted = new AtomicInteger(0);
|
final AtomicInteger ctPreDeleted = new AtomicInteger(0);
|
||||||
final AtomicInteger ctPrePrepareDeleteTS = new AtomicInteger(0);
|
final AtomicInteger ctPrePrepareDeleteTS = new AtomicInteger(0);
|
||||||
final AtomicInteger ctPostDeleted = new AtomicInteger(0);
|
final AtomicInteger ctPostDeleted = new AtomicInteger(0);
|
||||||
final AtomicInteger ctPreGetClosestRowBefore = new AtomicInteger(0);
|
|
||||||
final AtomicInteger ctPostGetClosestRowBefore = new AtomicInteger(0);
|
|
||||||
final AtomicInteger ctPreIncrement = new AtomicInteger(0);
|
final AtomicInteger ctPreIncrement = new AtomicInteger(0);
|
||||||
final AtomicInteger ctPreIncrementAfterRowLock = new AtomicInteger(0);
|
final AtomicInteger ctPreIncrementAfterRowLock = new AtomicInteger(0);
|
||||||
final AtomicInteger ctPreAppend = new AtomicInteger(0);
|
final AtomicInteger ctPreAppend = new AtomicInteger(0);
|
||||||
|
@ -517,32 +515,6 @@ public class SimpleRegionObserver extends BaseRegionObserver {
|
||||||
ctPostBatchMutateIndispensably.incrementAndGet();
|
ctPostBatchMutateIndispensably.incrementAndGet();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public void preGetClosestRowBefore(final ObserverContext<RegionCoprocessorEnvironment> c,
|
|
||||||
final byte[] row, final byte[] family, final Result result)
|
|
||||||
throws IOException {
|
|
||||||
RegionCoprocessorEnvironment e = c.getEnvironment();
|
|
||||||
assertNotNull(e);
|
|
||||||
assertNotNull(e.getRegion());
|
|
||||||
assertNotNull(row);
|
|
||||||
assertNotNull(result);
|
|
||||||
if (ctBeforeDelete.get() > 0) {
|
|
||||||
ctPreGetClosestRowBefore.incrementAndGet();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void postGetClosestRowBefore(final ObserverContext<RegionCoprocessorEnvironment> c,
|
|
||||||
final byte[] row, final byte[] family, final Result result)
|
|
||||||
throws IOException {
|
|
||||||
RegionCoprocessorEnvironment e = c.getEnvironment();
|
|
||||||
assertNotNull(e);
|
|
||||||
assertNotNull(e.getRegion());
|
|
||||||
assertNotNull(row);
|
|
||||||
assertNotNull(result);
|
|
||||||
ctPostGetClosestRowBefore.incrementAndGet();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Result preIncrement(final ObserverContext<RegionCoprocessorEnvironment> c,
|
public Result preIncrement(final ObserverContext<RegionCoprocessorEnvironment> c,
|
||||||
final Increment increment) throws IOException {
|
final Increment increment) throws IOException {
|
||||||
|
@ -940,14 +912,6 @@ public class SimpleRegionObserver extends BaseRegionObserver {
|
||||||
return ctPostDeleted.get();
|
return ctPostDeleted.get();
|
||||||
}
|
}
|
||||||
|
|
||||||
public int getCtPreGetClosestRowBefore() {
|
|
||||||
return ctPreGetClosestRowBefore.get();
|
|
||||||
}
|
|
||||||
|
|
||||||
public int getCtPostGetClosestRowBefore() {
|
|
||||||
return ctPostGetClosestRowBefore.get();
|
|
||||||
}
|
|
||||||
|
|
||||||
public int getCtPreIncrement() {
|
public int getCtPreIncrement() {
|
||||||
return ctPreIncrement.get();
|
return ctPreIncrement.get();
|
||||||
}
|
}
|
||||||
|
|
|
@ -33,7 +33,6 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
import org.apache.hadoop.hbase.KeyValue;
|
|
||||||
import org.apache.hadoop.hbase.MetaTableAccessor;
|
import org.apache.hadoop.hbase.MetaTableAccessor;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.hadoop.hbase.client.Delete;
|
import org.apache.hadoop.hbase.client.Delete;
|
||||||
|
@ -66,6 +65,8 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase {
|
||||||
private static final byte[] T35 = Bytes.toBytes("035");
|
private static final byte[] T35 = Bytes.toBytes("035");
|
||||||
private static final byte[] T40 = Bytes.toBytes("040");
|
private static final byte[] T40 = Bytes.toBytes("040");
|
||||||
|
|
||||||
|
private static HBaseTestingUtility UTIL = new HBaseTestingUtility();
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -160,7 +161,7 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase {
|
||||||
tableb, tofindBytes,
|
tableb, tofindBytes,
|
||||||
HConstants.NINES, false);
|
HConstants.NINES, false);
|
||||||
LOG.info("find=" + new String(metaKey));
|
LOG.info("find=" + new String(metaKey));
|
||||||
Result r = mr.getClosestRowBefore(metaKey, HConstants.CATALOG_FAMILY);
|
Result r = UTIL.getClosestRowBefore(mr, metaKey, HConstants.CATALOG_FAMILY);
|
||||||
if (answer == -1) {
|
if (answer == -1) {
|
||||||
assertNull(r);
|
assertNull(r);
|
||||||
return null;
|
return null;
|
||||||
|
@ -206,38 +207,38 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase {
|
||||||
p.add(c0, c0, T20);
|
p.add(c0, c0, T20);
|
||||||
region.put(p);
|
region.put(p);
|
||||||
|
|
||||||
Result r = region.getClosestRowBefore(T20, c0);
|
Result r = UTIL.getClosestRowBefore(region, T20, c0);
|
||||||
assertTrue(Bytes.equals(T20, r.getRow()));
|
assertTrue(Bytes.equals(T20, r.getRow()));
|
||||||
|
|
||||||
Delete d = new Delete(T20);
|
Delete d = new Delete(T20);
|
||||||
d.deleteColumn(c0, c0);
|
d.deleteColumn(c0, c0);
|
||||||
region.delete(d);
|
region.delete(d);
|
||||||
|
|
||||||
r = region.getClosestRowBefore(T20, c0);
|
r = UTIL.getClosestRowBefore(region, T20, c0);
|
||||||
assertTrue(Bytes.equals(T10, r.getRow()));
|
assertTrue(Bytes.equals(T10, r.getRow()));
|
||||||
|
|
||||||
p = new Put(T30);
|
p = new Put(T30);
|
||||||
p.add(c0, c0, T30);
|
p.add(c0, c0, T30);
|
||||||
region.put(p);
|
region.put(p);
|
||||||
|
|
||||||
r = region.getClosestRowBefore(T30, c0);
|
r = UTIL.getClosestRowBefore(region, T30, c0);
|
||||||
assertTrue(Bytes.equals(T30, r.getRow()));
|
assertTrue(Bytes.equals(T30, r.getRow()));
|
||||||
|
|
||||||
d = new Delete(T30);
|
d = new Delete(T30);
|
||||||
d.deleteColumn(c0, c0);
|
d.deleteColumn(c0, c0);
|
||||||
region.delete(d);
|
region.delete(d);
|
||||||
|
|
||||||
r = region.getClosestRowBefore(T30, c0);
|
r = UTIL.getClosestRowBefore(region, T30, c0);
|
||||||
assertTrue(Bytes.equals(T10, r.getRow()));
|
assertTrue(Bytes.equals(T10, r.getRow()));
|
||||||
r = region.getClosestRowBefore(T31, c0);
|
r = UTIL.getClosestRowBefore(region, T31, c0);
|
||||||
assertTrue(Bytes.equals(T10, r.getRow()));
|
assertTrue(Bytes.equals(T10, r.getRow()));
|
||||||
|
|
||||||
region.flush(true);
|
region.flush(true);
|
||||||
|
|
||||||
// try finding "010" after flush
|
// try finding "010" after flush
|
||||||
r = region.getClosestRowBefore(T30, c0);
|
r = UTIL.getClosestRowBefore(region, T30, c0);
|
||||||
assertTrue(Bytes.equals(T10, r.getRow()));
|
assertTrue(Bytes.equals(T10, r.getRow()));
|
||||||
r = region.getClosestRowBefore(T31, c0);
|
r = UTIL.getClosestRowBefore(region, T31, c0);
|
||||||
assertTrue(Bytes.equals(T10, r.getRow()));
|
assertTrue(Bytes.equals(T10, r.getRow()));
|
||||||
|
|
||||||
// Put into a different column family. Should make it so I still get t10
|
// Put into a different column family. Should make it so I still get t10
|
||||||
|
@ -245,16 +246,16 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase {
|
||||||
p.add(c1, c1, T20);
|
p.add(c1, c1, T20);
|
||||||
region.put(p);
|
region.put(p);
|
||||||
|
|
||||||
r = region.getClosestRowBefore(T30, c0);
|
r = UTIL.getClosestRowBefore(region, T30, c0);
|
||||||
assertTrue(Bytes.equals(T10, r.getRow()));
|
assertTrue(Bytes.equals(T10, r.getRow()));
|
||||||
r = region.getClosestRowBefore(T31, c0);
|
r = UTIL.getClosestRowBefore(region, T31, c0);
|
||||||
assertTrue(Bytes.equals(T10, r.getRow()));
|
assertTrue(Bytes.equals(T10, r.getRow()));
|
||||||
|
|
||||||
region.flush(true);
|
region.flush(true);
|
||||||
|
|
||||||
r = region.getClosestRowBefore(T30, c0);
|
r = UTIL.getClosestRowBefore(region, T30, c0);
|
||||||
assertTrue(Bytes.equals(T10, r.getRow()));
|
assertTrue(Bytes.equals(T10, r.getRow()));
|
||||||
r = region.getClosestRowBefore(T31, c0);
|
r = UTIL.getClosestRowBefore(region, T31, c0);
|
||||||
assertTrue(Bytes.equals(T10, r.getRow()));
|
assertTrue(Bytes.equals(T10, r.getRow()));
|
||||||
|
|
||||||
// Now try combo of memcache and mapfiles. Delete the t20 COLUMS[1]
|
// Now try combo of memcache and mapfiles. Delete the t20 COLUMS[1]
|
||||||
|
@ -262,14 +263,14 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase {
|
||||||
d = new Delete(T20);
|
d = new Delete(T20);
|
||||||
d.deleteColumn(c1, c1);
|
d.deleteColumn(c1, c1);
|
||||||
region.delete(d);
|
region.delete(d);
|
||||||
r = region.getClosestRowBefore(T30, c0);
|
r = UTIL.getClosestRowBefore(region, T30, c0);
|
||||||
assertTrue(Bytes.equals(T10, r.getRow()));
|
assertTrue(Bytes.equals(T10, r.getRow()));
|
||||||
|
|
||||||
// Ask for a value off the end of the file. Should return t10.
|
// Ask for a value off the end of the file. Should return t10.
|
||||||
r = region.getClosestRowBefore(T31, c0);
|
r = UTIL.getClosestRowBefore(region, T31, c0);
|
||||||
assertTrue(Bytes.equals(T10, r.getRow()));
|
assertTrue(Bytes.equals(T10, r.getRow()));
|
||||||
region.flush(true);
|
region.flush(true);
|
||||||
r = region.getClosestRowBefore(T31, c0);
|
r = UTIL.getClosestRowBefore(region, T31, c0);
|
||||||
assertTrue(Bytes.equals(T10, r.getRow()));
|
assertTrue(Bytes.equals(T10, r.getRow()));
|
||||||
|
|
||||||
// Ok. Let the candidate come out of hfile but have delete of
|
// Ok. Let the candidate come out of hfile but have delete of
|
||||||
|
@ -279,7 +280,7 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase {
|
||||||
region.put(p);
|
region.put(p);
|
||||||
d = new Delete(T10);
|
d = new Delete(T10);
|
||||||
d.deleteColumn(c1, c1);
|
d.deleteColumn(c1, c1);
|
||||||
r = region.getClosestRowBefore(T12, c0);
|
r = UTIL.getClosestRowBefore(region, T12, c0);
|
||||||
assertTrue(Bytes.equals(T11, r.getRow()));
|
assertTrue(Bytes.equals(T11, r.getRow()));
|
||||||
} finally {
|
} finally {
|
||||||
if (region != null) {
|
if (region != null) {
|
||||||
|
@ -316,13 +317,13 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase {
|
||||||
region.put(p);
|
region.put(p);
|
||||||
|
|
||||||
// try finding "035"
|
// try finding "035"
|
||||||
Result r = region.getClosestRowBefore(T35, c0);
|
Result r = UTIL.getClosestRowBefore(region, T35, c0);
|
||||||
assertTrue(Bytes.equals(T30, r.getRow()));
|
assertTrue(Bytes.equals(T30, r.getRow()));
|
||||||
|
|
||||||
region.flush(true);
|
region.flush(true);
|
||||||
|
|
||||||
// try finding "035"
|
// try finding "035"
|
||||||
r = region.getClosestRowBefore(T35, c0);
|
r = UTIL.getClosestRowBefore(region, T35, c0);
|
||||||
assertTrue(Bytes.equals(T30, r.getRow()));
|
assertTrue(Bytes.equals(T30, r.getRow()));
|
||||||
|
|
||||||
p = new Put(T20);
|
p = new Put(T20);
|
||||||
|
@ -330,13 +331,13 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase {
|
||||||
region.put(p);
|
region.put(p);
|
||||||
|
|
||||||
// try finding "035"
|
// try finding "035"
|
||||||
r = region.getClosestRowBefore(T35, c0);
|
r = UTIL.getClosestRowBefore(region, T35, c0);
|
||||||
assertTrue(Bytes.equals(T30, r.getRow()));
|
assertTrue(Bytes.equals(T30, r.getRow()));
|
||||||
|
|
||||||
region.flush(true);
|
region.flush(true);
|
||||||
|
|
||||||
// try finding "035"
|
// try finding "035"
|
||||||
r = region.getClosestRowBefore(T35, c0);
|
r = UTIL.getClosestRowBefore(region, T35, c0);
|
||||||
assertTrue(Bytes.equals(T30, r.getRow()));
|
assertTrue(Bytes.equals(T30, r.getRow()));
|
||||||
} finally {
|
} finally {
|
||||||
if (region != null) {
|
if (region != null) {
|
||||||
|
|
|
@ -89,20 +89,20 @@ public class TestMinVersions {
|
||||||
// now make sure that getClosestBefore(...) get can
|
// now make sure that getClosestBefore(...) get can
|
||||||
// rows that would be expired without minVersion.
|
// rows that would be expired without minVersion.
|
||||||
// also make sure it gets the latest version
|
// also make sure it gets the latest version
|
||||||
Result r = region.getClosestRowBefore(T1, c0);
|
Result r = hbu.getClosestRowBefore(region, T1, c0);
|
||||||
checkResult(r, c0, T4);
|
checkResult(r, c0, T4);
|
||||||
|
|
||||||
r = region.getClosestRowBefore(T2, c0);
|
r = hbu.getClosestRowBefore(region, T2, c0);
|
||||||
checkResult(r, c0, T4);
|
checkResult(r, c0, T4);
|
||||||
|
|
||||||
// now flush/compact
|
// now flush/compact
|
||||||
region.flush(true);
|
region.flush(true);
|
||||||
region.compact(true);
|
region.compact(true);
|
||||||
|
|
||||||
r = region.getClosestRowBefore(T1, c0);
|
r = hbu.getClosestRowBefore(region, T1, c0);
|
||||||
checkResult(r, c0, T4);
|
checkResult(r, c0, T4);
|
||||||
|
|
||||||
r = region.getClosestRowBefore(T2, c0);
|
r = hbu.getClosestRowBefore(region, T2, c0);
|
||||||
checkResult(r, c0, T4);
|
checkResult(r, c0, T4);
|
||||||
} finally {
|
} finally {
|
||||||
HBaseTestingUtility.closeRegionAndWAL(region);
|
HBaseTestingUtility.closeRegionAndWAL(region);
|
||||||
|
|
|
@ -879,16 +879,6 @@ public class TestWithDisabledAuthorization extends SecureTestUtil {
|
||||||
}
|
}
|
||||||
}, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE);
|
}, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE);
|
||||||
|
|
||||||
// preGetClosestRowBefore
|
|
||||||
verifyAllowed(new AccessTestAction() {
|
|
||||||
@Override
|
|
||||||
public Object run() throws Exception {
|
|
||||||
ACCESS_CONTROLLER.preGetClosestRowBefore(ObserverContext.createAndPrepare(RCP_ENV, null),
|
|
||||||
TEST_ROW, TEST_FAMILY, new Result());
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
}, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE);
|
|
||||||
|
|
||||||
// preGetOp
|
// preGetOp
|
||||||
verifyAllowed(new AccessTestAction() {
|
verifyAllowed(new AccessTestAction() {
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -1610,25 +1610,12 @@ public class ThriftServerRunner implements Runnable {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Deprecated
|
|
||||||
@Override
|
|
||||||
public List<TCell> getRowOrBefore(ByteBuffer tableName, ByteBuffer row,
|
|
||||||
ByteBuffer family) throws IOError {
|
|
||||||
try {
|
|
||||||
Result result = getRowOrBefore(getBytes(tableName), getBytes(row), getBytes(family));
|
|
||||||
return ThriftUtilities.cellFromHBase(result.rawCells());
|
|
||||||
} catch (IOException e) {
|
|
||||||
LOG.warn(e.getMessage(), e);
|
|
||||||
throw new IOError(Throwables.getStackTraceAsString(e));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public TRegionInfo getRegionInfo(ByteBuffer searchRow) throws IOError {
|
public TRegionInfo getRegionInfo(ByteBuffer searchRow) throws IOError {
|
||||||
try {
|
try {
|
||||||
byte[] row = getBytes(searchRow);
|
byte[] row = getBytes(searchRow);
|
||||||
Result startRowResult =
|
Result startRowResult = getReverseScanResult(TableName.META_TABLE_NAME.getName(), row,
|
||||||
getRowOrBefore(TableName.META_TABLE_NAME.getName(), row, HConstants.CATALOG_FAMILY);
|
HConstants.CATALOG_FAMILY);
|
||||||
|
|
||||||
if (startRowResult == null) {
|
if (startRowResult == null) {
|
||||||
throw new IOException("Cannot find row in "+ TableName.META_TABLE_NAME+", row="
|
throw new IOException("Cannot find row in "+ TableName.META_TABLE_NAME+", row="
|
||||||
|
@ -1662,7 +1649,8 @@ public class ThriftServerRunner implements Runnable {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private Result getRowOrBefore(byte[] tableName, byte[] row, byte[] family) throws IOException {
|
private Result getReverseScanResult(byte[] tableName, byte[] row, byte[] family)
|
||||||
|
throws IOException {
|
||||||
Scan scan = new Scan(row);
|
Scan scan = new Scan(row);
|
||||||
scan.setReversed(true);
|
scan.setReversed(true);
|
||||||
scan.addFamily(family);
|
scan.addFamily(family);
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -906,22 +906,6 @@ service Hbase {
|
||||||
1:ScannerID id
|
1:ScannerID id
|
||||||
) throws (1:IOError io, 2:IllegalArgument ia)
|
) throws (1:IOError io, 2:IllegalArgument ia)
|
||||||
|
|
||||||
/**
|
|
||||||
* Get the row just before the specified one.
|
|
||||||
*
|
|
||||||
* @return value for specified row/column
|
|
||||||
*/
|
|
||||||
list<TCell> getRowOrBefore(
|
|
||||||
/** name of table */
|
|
||||||
1:Text tableName,
|
|
||||||
|
|
||||||
/** row key */
|
|
||||||
2:Text row,
|
|
||||||
|
|
||||||
/** column name */
|
|
||||||
3:Text family
|
|
||||||
) throws (1:IOError io)
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the regininfo for the specified row. It scans
|
* Get the regininfo for the specified row. It scans
|
||||||
* the metatable to find region's start and end keys.
|
* the metatable to find region's start and end keys.
|
||||||
|
|
Loading…
Reference in New Issue