HBASE-17320 Add inclusive/exclusive support for startRow and endRow of scan

This commit is contained in:
zhangduo 2016-12-28 21:35:50 +08:00
parent a3e0e0df0d
commit 05b1d918b0
30 changed files with 1103 additions and 503 deletions

View File

@ -19,7 +19,7 @@ package org.apache.hadoop.hbase.client;
import static org.apache.hadoop.hbase.HConstants.EMPTY_END_ROW; import static org.apache.hadoop.hbase.HConstants.EMPTY_END_ROW;
import static org.apache.hadoop.hbase.HConstants.EMPTY_START_ROW; import static org.apache.hadoop.hbase.HConstants.EMPTY_START_ROW;
import static org.apache.hadoop.hbase.client.ConnectionUtils.isEmptyStartRow; import static org.apache.hadoop.hbase.client.ConnectionUtils.getLocateType;
import java.io.IOException; import java.io.IOException;
import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletableFuture;
@ -64,10 +64,10 @@ class AsyncClientScanner {
public AsyncClientScanner(Scan scan, RawScanResultConsumer consumer, TableName tableName, public AsyncClientScanner(Scan scan, RawScanResultConsumer consumer, TableName tableName,
AsyncConnectionImpl conn, long scanTimeoutNs, long rpcTimeoutNs) { AsyncConnectionImpl conn, long scanTimeoutNs, long rpcTimeoutNs) {
if (scan.getStartRow() == null) { if (scan.getStartRow() == null) {
scan.setStartRow(EMPTY_START_ROW); scan.withStartRow(EMPTY_START_ROW, scan.includeStartRow());
} }
if (scan.getStopRow() == null) { if (scan.getStopRow() == null) {
scan.setStopRow(EMPTY_END_ROW); scan.withStopRow(EMPTY_END_ROW, scan.includeStopRow());
} }
this.scan = scan; this.scan = scan;
this.consumer = consumer; this.consumer = consumer;
@ -117,23 +117,22 @@ class AsyncClientScanner {
conn.callerFactory.scanSingleRegion().id(resp.scannerId).location(resp.loc).stub(resp.stub) conn.callerFactory.scanSingleRegion().id(resp.scannerId).location(resp.loc).stub(resp.stub)
.setScan(scan).consumer(consumer).resultCache(resultCache) .setScan(scan).consumer(consumer).resultCache(resultCache)
.rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS) .rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS)
.scanTimeout(scanTimeoutNs, TimeUnit.NANOSECONDS).start() .scanTimeout(scanTimeoutNs, TimeUnit.NANOSECONDS).start().whenComplete((hasMore, error) -> {
.whenComplete((locateType, error) -> {
if (error != null) { if (error != null) {
consumer.onError(error); consumer.onError(error);
return; return;
} }
if (locateType == null) { if (hasMore) {
consumer.onComplete(); openScanner();
} else { } else {
openScanner(locateType); consumer.onComplete();
} }
}); });
} }
private void openScanner(RegionLocateType locateType) { private void openScanner() {
conn.callerFactory.<OpenScannerResponse> single().table(tableName).row(scan.getStartRow()) conn.callerFactory.<OpenScannerResponse> single().table(tableName).row(scan.getStartRow())
.locateType(locateType).rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS) .locateType(getLocateType(scan)).rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS)
.operationTimeout(scanTimeoutNs, TimeUnit.NANOSECONDS).action(this::callOpenScanner).call() .operationTimeout(scanTimeoutNs, TimeUnit.NANOSECONDS).action(this::callOpenScanner).call()
.whenComplete((resp, error) -> { .whenComplete((resp, error) -> {
if (error != null) { if (error != null) {
@ -145,7 +144,6 @@ class AsyncClientScanner {
} }
public void start() { public void start() {
openScanner(scan.isReversed() && isEmptyStartRow(scan.getStartRow()) ? RegionLocateType.BEFORE openScanner();
: RegionLocateType.CURRENT);
} }
} }

View File

@ -246,7 +246,7 @@ class AsyncRpcRetryingCallerFactory {
/** /**
* Short cut for {@code build().start()}. * Short cut for {@code build().start()}.
*/ */
public CompletableFuture<RegionLocateType> start() { public CompletableFuture<Boolean> start() {
return build().start(); return build().start();
} }
} }

View File

@ -17,11 +17,9 @@
*/ */
package org.apache.hadoop.hbase.client; package org.apache.hadoop.hbase.client;
import static org.apache.hadoop.hbase.client.ConnectionUtils.createClosestRowAfter;
import static org.apache.hadoop.hbase.client.ConnectionUtils.createClosestRowBefore;
import static org.apache.hadoop.hbase.client.ConnectionUtils.getPauseTime; import static org.apache.hadoop.hbase.client.ConnectionUtils.getPauseTime;
import static org.apache.hadoop.hbase.client.ConnectionUtils.isEmptyStartRow; import static org.apache.hadoop.hbase.client.ConnectionUtils.noMoreResultsForReverseScan;
import static org.apache.hadoop.hbase.client.ConnectionUtils.isEmptyStopRow; import static org.apache.hadoop.hbase.client.ConnectionUtils.noMoreResultsForScan;
import static org.apache.hadoop.hbase.client.ConnectionUtils.resetController; import static org.apache.hadoop.hbase.client.ConnectionUtils.resetController;
import static org.apache.hadoop.hbase.client.ConnectionUtils.retries2Attempts; import static org.apache.hadoop.hbase.client.ConnectionUtils.retries2Attempts;
import static org.apache.hadoop.hbase.client.ConnectionUtils.translateException; import static org.apache.hadoop.hbase.client.ConnectionUtils.translateException;
@ -34,7 +32,6 @@ import java.util.List;
import java.util.Optional; import java.util.Optional;
import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletableFuture;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import java.util.function.Supplier;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
@ -53,7 +50,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientServ
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService.Interface; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService.Interface;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanResponse;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
/** /**
@ -91,11 +87,9 @@ class AsyncScanSingleRegionRpcRetryingCaller {
private final int startLogErrorsCnt; private final int startLogErrorsCnt;
private final Supplier<byte[]> createNextStartRowWhenError;
private final Runnable completeWhenNoMoreResultsInRegion; private final Runnable completeWhenNoMoreResultsInRegion;
private final CompletableFuture<RegionLocateType> future; private final CompletableFuture<Boolean> future;
private final HBaseRpcController controller; private final HBaseRpcController controller;
@ -128,10 +122,8 @@ class AsyncScanSingleRegionRpcRetryingCaller {
this.rpcTimeoutNs = rpcTimeoutNs; this.rpcTimeoutNs = rpcTimeoutNs;
this.startLogErrorsCnt = startLogErrorsCnt; this.startLogErrorsCnt = startLogErrorsCnt;
if (scan.isReversed()) { if (scan.isReversed()) {
createNextStartRowWhenError = this::createReversedNextStartRowWhenError;
completeWhenNoMoreResultsInRegion = this::completeReversedWhenNoMoreResultsInRegion; completeWhenNoMoreResultsInRegion = this::completeReversedWhenNoMoreResultsInRegion;
} else { } else {
createNextStartRowWhenError = this::createNextStartRowWhenError;
completeWhenNoMoreResultsInRegion = this::completeWhenNoMoreResultsInRegion; completeWhenNoMoreResultsInRegion = this::completeWhenNoMoreResultsInRegion;
} }
this.future = new CompletableFuture<>(); this.future = new CompletableFuture<>();
@ -164,23 +156,13 @@ class AsyncScanSingleRegionRpcRetryingCaller {
future.completeExceptionally(new RetriesExhaustedException(tries - 1, exceptions)); future.completeExceptionally(new RetriesExhaustedException(tries - 1, exceptions));
} }
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NP_NONNULL_PARAM_VIOLATION",
justification = "https://github.com/findbugsproject/findbugs/issues/79")
private void completeNoMoreResults() { private void completeNoMoreResults() {
future.complete(null); future.complete(false);
} }
private void completeWithNextStartRow(byte[] nextStartRow) { private void completeWithNextStartRow(byte[] row, boolean inclusive) {
scan.setStartRow(nextStartRow); scan.withStartRow(row, inclusive);
future.complete(scan.isReversed() ? RegionLocateType.BEFORE : RegionLocateType.CURRENT); future.complete(true);
}
private byte[] createNextStartRowWhenError() {
return createClosestRowAfter(nextStartRowWhenError);
}
private byte[] createReversedNextStartRowWhenError() {
return createClosestRowBefore(nextStartRowWhenError);
} }
private void completeWhenError(boolean closeScanner) { private void completeWhenError(boolean closeScanner) {
@ -189,12 +171,9 @@ class AsyncScanSingleRegionRpcRetryingCaller {
closeScanner(); closeScanner();
} }
if (nextStartRowWhenError != null) { if (nextStartRowWhenError != null) {
scan.setStartRow( scan.withStartRow(nextStartRowWhenError, includeNextStartRowWhenError);
includeNextStartRowWhenError ? nextStartRowWhenError : createNextStartRowWhenError.get());
} }
future.complete( future.complete(true);
scan.isReversed() && Bytes.equals(scan.getStartRow(), loc.getRegionInfo().getEndKey())
? RegionLocateType.BEFORE : RegionLocateType.CURRENT);
} }
private void onError(Throwable error) { private void onError(Throwable error) {
@ -251,30 +230,20 @@ class AsyncScanSingleRegionRpcRetryingCaller {
} }
private void completeWhenNoMoreResultsInRegion() { private void completeWhenNoMoreResultsInRegion() {
if (isEmptyStopRow(scan.getStopRow())) { if (noMoreResultsForScan(scan, loc.getRegionInfo())) {
if (isEmptyStopRow(loc.getRegionInfo().getEndKey())) {
completeNoMoreResults(); completeNoMoreResults();
}
} else { } else {
if (Bytes.compareTo(loc.getRegionInfo().getEndKey(), scan.getStopRow()) >= 0) { completeWithNextStartRow(loc.getRegionInfo().getEndKey(), true);
completeNoMoreResults();
} }
} }
completeWithNextStartRow(loc.getRegionInfo().getEndKey());
}
private void completeReversedWhenNoMoreResultsInRegion() { private void completeReversedWhenNoMoreResultsInRegion() {
if (isEmptyStopRow(scan.getStopRow())) { if (noMoreResultsForReverseScan(scan, loc.getRegionInfo())) {
if (isEmptyStartRow(loc.getRegionInfo().getStartKey())) {
completeNoMoreResults(); completeNoMoreResults();
}
} else { } else {
if (Bytes.compareTo(loc.getRegionInfo().getStartKey(), scan.getStopRow()) <= 0) { completeWithNextStartRow(loc.getRegionInfo().getStartKey(), false);
completeNoMoreResults();
} }
} }
completeWithNextStartRow(loc.getRegionInfo().getStartKey());
}
private void onComplete(ScanResponse resp) { private void onComplete(ScanResponse resp) {
if (controller.failed()) { if (controller.failed()) {
@ -343,9 +312,9 @@ class AsyncScanSingleRegionRpcRetryingCaller {
} }
/** /**
* @return return locate direction for next open scanner call, or null if we should stop. * @return {@code true} if we should continue, otherwise {@code false}.
*/ */
public CompletableFuture<RegionLocateType> start() { public CompletableFuture<Boolean> start() {
next(); next();
return future; return future;
} }

View File

@ -17,8 +17,9 @@
*/ */
package org.apache.hadoop.hbase.client; package org.apache.hadoop.hbase.client;
import static org.apache.hadoop.hbase.client.ConnectionUtils.isEmptyStartRow; import static org.apache.hadoop.hbase.client.ConnectionUtils.getLocateType;
import static org.apache.hadoop.hbase.client.ConnectionUtils.isEmptyStopRow; import static org.apache.hadoop.hbase.client.ConnectionUtils.noMoreResultsForReverseScan;
import static org.apache.hadoop.hbase.client.ConnectionUtils.noMoreResultsForScan;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
@ -37,7 +38,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter; import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanRequest;
import org.apache.hadoop.hbase.util.Bytes;
/** /**
* Retry caller for smaller scan. * Retry caller for smaller scan.
@ -57,10 +57,6 @@ class AsyncSmallScanRpcRetryingCaller {
private final long rpcTimeoutNs; private final long rpcTimeoutNs;
private final Function<byte[], byte[]> createClosestNextRow;
private final Runnable firstScan;
private final Function<HRegionInfo, Boolean> nextScan; private final Function<HRegionInfo, Boolean> nextScan;
private final List<Result> resultList; private final List<Result> resultList;
@ -76,12 +72,8 @@ class AsyncSmallScanRpcRetryingCaller {
this.scanTimeoutNs = scanTimeoutNs; this.scanTimeoutNs = scanTimeoutNs;
this.rpcTimeoutNs = rpcTimeoutNs; this.rpcTimeoutNs = rpcTimeoutNs;
if (scan.isReversed()) { if (scan.isReversed()) {
this.createClosestNextRow = ConnectionUtils::createClosestRowBefore;
this.firstScan = this::reversedFirstScan;
this.nextScan = this::reversedNextScan; this.nextScan = this::reversedNextScan;
} else { } else {
this.createClosestNextRow = ConnectionUtils::createClosestRowAfter;
this.firstScan = this::firstScan;
this.nextScan = this::nextScan; this.nextScan = this::nextScan;
} }
this.resultList = new ArrayList<>(); this.resultList = new ArrayList<>();
@ -141,10 +133,9 @@ class AsyncSmallScanRpcRetryingCaller {
} }
if (resp.hasMoreResultsInRegion) { if (resp.hasMoreResultsInRegion) {
if (resp.results.length > 0) { if (resp.results.length > 0) {
scan.setStartRow( scan.withStartRow(resp.results[resp.results.length - 1].getRow(), false);
createClosestNextRow.apply(resp.results[resp.results.length - 1].getRow()));
} }
scan(RegionLocateType.CURRENT); scan();
return; return;
} }
if (!nextScan.apply(resp.currentRegion)) { if (!nextScan.apply(resp.currentRegion)) {
@ -152,11 +143,11 @@ class AsyncSmallScanRpcRetryingCaller {
} }
} }
private void scan(RegionLocateType locateType) { private void scan() {
conn.callerFactory.<SmallScanResponse> single().table(tableName).row(scan.getStartRow()) conn.callerFactory.<SmallScanResponse> single().table(tableName).row(scan.getStartRow())
.rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS) .locateType(getLocateType(scan)).rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS)
.operationTimeout(scanTimeoutNs, TimeUnit.NANOSECONDS).locateType(locateType) .operationTimeout(scanTimeoutNs, TimeUnit.NANOSECONDS).action(this::scan).call()
.action(this::scan).call().whenComplete((resp, error) -> { .whenComplete((resp, error) -> {
if (error != null) { if (error != null) {
future.completeExceptionally(error); future.completeExceptionally(error);
} else { } else {
@ -166,45 +157,27 @@ class AsyncSmallScanRpcRetryingCaller {
} }
public CompletableFuture<List<Result>> call() { public CompletableFuture<List<Result>> call() {
firstScan.run(); scan();
return future; return future;
} }
private void firstScan() { private boolean nextScan(HRegionInfo info) {
scan(RegionLocateType.CURRENT); if (noMoreResultsForScan(scan, info)) {
}
private void reversedFirstScan() {
scan(isEmptyStartRow(scan.getStartRow()) ? RegionLocateType.BEFORE : RegionLocateType.CURRENT);
}
private boolean nextScan(HRegionInfo region) {
if (isEmptyStopRow(scan.getStopRow())) {
if (isEmptyStopRow(region.getEndKey())) {
return false; return false;
}
} else { } else {
if (Bytes.compareTo(region.getEndKey(), scan.getStopRow()) >= 0) { scan.withStartRow(info.getEndKey());
return false; scan();
}
}
scan.setStartRow(region.getEndKey());
scan(RegionLocateType.CURRENT);
return true; return true;
} }
}
private boolean reversedNextScan(HRegionInfo region) { private boolean reversedNextScan(HRegionInfo info) {
if (isEmptyStopRow(scan.getStopRow())) { if (noMoreResultsForReverseScan(scan, info)) {
if (isEmptyStartRow(region.getStartKey())) {
return false; return false;
}
} else { } else {
if (Bytes.compareTo(region.getStartKey(), scan.getStopRow()) <= 0) { scan.withStartRow(info.getStartKey(), false);
return false; scan();
}
}
scan.setStartRow(region.getStartKey());
scan(RegionLocateType.BEFORE);
return true; return true;
} }
}
} }

View File

@ -26,7 +26,6 @@ import java.io.IOException;
import java.io.InterruptedIOException; import java.io.InterruptedIOException;
import java.util.ArrayDeque; import java.util.ArrayDeque;
import java.util.Queue; import java.util.Queue;
import java.util.function.Function;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
@ -65,14 +64,10 @@ class AsyncTableResultScanner implements ResultScanner, RawScanResultConsumer {
// used to filter out cells that already returned when we restart a scan // used to filter out cells that already returned when we restart a scan
private Cell lastCell; private Cell lastCell;
private Function<byte[], byte[]> createClosestRow;
public AsyncTableResultScanner(RawAsyncTable table, Scan scan, long maxCacheSize) { public AsyncTableResultScanner(RawAsyncTable table, Scan scan, long maxCacheSize) {
this.rawTable = table; this.rawTable = table;
this.scan = scan; this.scan = scan;
this.maxCacheSize = maxCacheSize; this.maxCacheSize = maxCacheSize;
this.createClosestRow = scan.isReversed() ? ConnectionUtils::createClosestRowBefore
: ConnectionUtils::createClosestRowAfter;
table.scan(scan, this); table.scan(scan, this);
} }
@ -84,15 +79,16 @@ class AsyncTableResultScanner implements ResultScanner, RawScanResultConsumer {
private void stopPrefetch(Result lastResult) { private void stopPrefetch(Result lastResult) {
prefetchStopped = true; prefetchStopped = true;
if (lastResult.isPartial() || scan.getBatch() > 0) { if (lastResult.isPartial() || scan.getBatch() > 0) {
scan.setStartRow(lastResult.getRow()); scan.withStartRow(lastResult.getRow());
lastCell = lastResult.rawCells()[lastResult.rawCells().length - 1]; lastCell = lastResult.rawCells()[lastResult.rawCells().length - 1];
} else { } else {
scan.setStartRow(createClosestRow.apply(lastResult.getRow())); scan.withStartRow(lastResult.getRow(), false);
} }
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug(System.identityHashCode(this) + " stop prefetching when scanning " LOG.debug(
String.format("0x%x", System.identityHashCode(this)) + " stop prefetching when scanning "
+ rawTable.getName() + " as the cache size " + cacheSize + rawTable.getName() + " as the cache size " + cacheSize
+ " is greater than the maxCacheSize + " + maxCacheSize + ", the next start row is " + " is greater than the maxCacheSize " + maxCacheSize + ", the next start row is "
+ Bytes.toStringBinary(scan.getStartRow()) + ", lastCell is " + lastCell); + Bytes.toStringBinary(scan.getStartRow()) + ", lastCell is " + lastCell);
} }
// Ignore an onComplete call as the scan is stopped by us. // Ignore an onComplete call as the scan is stopped by us.
@ -166,7 +162,7 @@ class AsyncTableResultScanner implements ResultScanner, RawScanResultConsumer {
private void resumePrefetch() { private void resumePrefetch() {
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug(System.identityHashCode(this) + " resume prefetching"); LOG.debug(String.format("0x%x", System.identityHashCode(this)) + " resume prefetching");
} }
prefetchStopped = false; prefetchStopped = false;
rawTable.scan(scan, this); rawTable.scan(scan, this);

View File

@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience;
@ -248,7 +249,10 @@ public final class ConnectionUtils {
/** /**
* Create the closest row before the specified row * Create the closest row before the specified row
* @deprecated in fact, we do not know the closest row before the given row, the result is only a
* row very close to the current row. Avoid using this method in the future.
*/ */
@Deprecated
static byte[] createClosestRowBefore(byte[] row) { static byte[] createClosestRowBefore(byte[] row) {
if (row.length == 0) { if (row.length == 0) {
return MAX_BYTE_ARRAY; return MAX_BYTE_ARRAY;
@ -347,4 +351,42 @@ public final class ConnectionUtils {
static CompletableFuture<Void> voidBatchAll(AsyncTableBase table, List<? extends Row> actions) { static CompletableFuture<Void> voidBatchAll(AsyncTableBase table, List<? extends Row> actions) {
return table.<Object> batchAll(actions).thenApply(r -> null); return table.<Object> batchAll(actions).thenApply(r -> null);
} }
static RegionLocateType getLocateType(Scan scan) {
if (scan.isReversed()) {
if (isEmptyStartRow(scan.getStartRow())) {
return RegionLocateType.BEFORE;
} else {
return scan.includeStartRow() ? RegionLocateType.CURRENT : RegionLocateType.BEFORE;
}
} else {
return scan.includeStartRow() ? RegionLocateType.CURRENT : RegionLocateType.AFTER;
}
}
static boolean noMoreResultsForScan(Scan scan, HRegionInfo info) {
if (isEmptyStopRow(info.getEndKey())) {
return true;
}
if (isEmptyStopRow(scan.getStopRow())) {
return false;
}
int c = Bytes.compareTo(info.getEndKey(), scan.getStopRow());
// 1. if our stop row is less than the endKey of the region
// 2. if our stop row is equal to the endKey of the region and we do not include the stop row
// for scan.
return c > 0 || (c == 0 && !scan.includeStopRow());
}
static boolean noMoreResultsForReverseScan(Scan scan, HRegionInfo info) {
if (isEmptyStartRow(info.getStartKey())) {
return true;
}
if (isEmptyStopRow(scan.getStopRow())) {
return false;
}
// no need to test the inclusive of the stop row as the start key of a region is included in
// the region.
return Bytes.compareTo(info.getStartKey(), scan.getStopRow()) <= 0;
}
} }

View File

@ -87,7 +87,9 @@ public class Scan extends Query {
private static final String RAW_ATTR = "_raw_"; private static final String RAW_ATTR = "_raw_";
private byte [] startRow = HConstants.EMPTY_START_ROW; private byte [] startRow = HConstants.EMPTY_START_ROW;
private boolean includeStartRow = true;
private byte [] stopRow = HConstants.EMPTY_END_ROW; private byte [] stopRow = HConstants.EMPTY_END_ROW;
private boolean includeStopRow = false;
private int maxVersions = 1; private int maxVersions = 1;
private int batch = -1; private int batch = -1;
@ -106,7 +108,6 @@ public class Scan extends Query {
private int storeLimit = -1; private int storeLimit = -1;
private int storeOffset = 0; private int storeOffset = 0;
private boolean getScan;
/** /**
* @deprecated since 1.0.0. Use {@link #setScanMetricsEnabled(boolean)} * @deprecated since 1.0.0. Use {@link #setScanMetricsEnabled(boolean)}
@ -135,8 +136,8 @@ public class Scan extends Query {
private long maxResultSize = -1; private long maxResultSize = -1;
private boolean cacheBlocks = true; private boolean cacheBlocks = true;
private boolean reversed = false; private boolean reversed = false;
private Map<byte [], NavigableSet<byte []>> familyMap = private Map<byte[], NavigableSet<byte[]>> familyMap =
new TreeMap<byte [], NavigableSet<byte []>>(Bytes.BYTES_COMPARATOR); new TreeMap<byte[], NavigableSet<byte[]>>(Bytes.BYTES_COMPARATOR);
private Boolean asyncPrefetch = null; private Boolean asyncPrefetch = null;
/** /**
@ -175,7 +176,11 @@ public class Scan extends Query {
*/ */
public Scan() {} public Scan() {}
public Scan(byte [] startRow, Filter filter) { /**
* @deprecated use {@code new Scan().withStartRow(startRow).setFilter(filter)} instead.
*/
@Deprecated
public Scan(byte[] startRow, Filter filter) {
this(startRow); this(startRow);
this.filter = filter; this.filter = filter;
} }
@ -183,24 +188,26 @@ public class Scan extends Query {
/** /**
* Create a Scan operation starting at the specified row. * Create a Scan operation starting at the specified row.
* <p> * <p>
* If the specified row does not exist, the Scanner will start from the * If the specified row does not exist, the Scanner will start from the next closest row after the
* next closest row after the specified row. * specified row.
* @param startRow row to start scanner at or after * @param startRow row to start scanner at or after
* @deprecated use {@code new Scan().withStartRow(startRow)} instead.
*/ */
public Scan(byte [] startRow) { @Deprecated
this.startRow = startRow; public Scan(byte[] startRow) {
setStartRow(startRow);
} }
/** /**
* Create a Scan operation for the range of rows specified. * Create a Scan operation for the range of rows specified.
* @param startRow row to start scanner at or after (inclusive) * @param startRow row to start scanner at or after (inclusive)
* @param stopRow row to stop scanner before (exclusive) * @param stopRow row to stop scanner before (exclusive)
* @deprecated use {@code new Scan().withStartRow(startRow).withStopRow(stopRow)} instead.
*/ */
public Scan(byte [] startRow, byte [] stopRow) { @Deprecated
this.startRow = startRow; public Scan(byte[] startRow, byte[] stopRow) {
this.stopRow = stopRow; setStartRow(startRow);
//if the startRow and stopRow both are empty, it is not a Get setStopRow(stopRow);
this.getScan = isStartRowAndEqualsStopRow();
} }
/** /**
@ -211,7 +218,9 @@ public class Scan extends Query {
*/ */
public Scan(Scan scan) throws IOException { public Scan(Scan scan) throws IOException {
startRow = scan.getStartRow(); startRow = scan.getStartRow();
includeStartRow = scan.includeStartRow();
stopRow = scan.getStopRow(); stopRow = scan.getStopRow();
includeStopRow = scan.includeStopRow();
maxVersions = scan.getMaxVersions(); maxVersions = scan.getMaxVersions();
batch = scan.getBatch(); batch = scan.getBatch();
storeLimit = scan.getMaxResultsPerColumnFamily(); storeLimit = scan.getMaxResultsPerColumnFamily();
@ -219,7 +228,6 @@ public class Scan extends Query {
caching = scan.getCaching(); caching = scan.getCaching();
maxResultSize = scan.getMaxResultSize(); maxResultSize = scan.getMaxResultSize();
cacheBlocks = scan.getCacheBlocks(); cacheBlocks = scan.getCacheBlocks();
getScan = scan.isGetScan();
filter = scan.getFilter(); // clone? filter = scan.getFilter(); // clone?
loadColumnFamiliesOnDemand = scan.getLoadColumnFamiliesOnDemandValue(); loadColumnFamiliesOnDemand = scan.getLoadColumnFamiliesOnDemandValue();
consistency = scan.getConsistency(); consistency = scan.getConsistency();
@ -228,8 +236,7 @@ public class Scan extends Query {
asyncPrefetch = scan.isAsyncPrefetch(); asyncPrefetch = scan.isAsyncPrefetch();
small = scan.isSmall(); small = scan.isSmall();
allowPartialResults = scan.getAllowPartialResults(); allowPartialResults = scan.getAllowPartialResults();
TimeRange ctr = scan.getTimeRange(); tr = scan.getTimeRange(); // TimeRange is immutable
tr = new TimeRange(ctr.getMin(), ctr.getMax());
Map<byte[], NavigableSet<byte[]>> fams = scan.getFamilyMap(); Map<byte[], NavigableSet<byte[]>> fams = scan.getFamilyMap();
for (Map.Entry<byte[],NavigableSet<byte[]>> entry : fams.entrySet()) { for (Map.Entry<byte[],NavigableSet<byte[]>> entry : fams.entrySet()) {
byte [] fam = entry.getKey(); byte [] fam = entry.getKey();
@ -258,7 +265,9 @@ public class Scan extends Query {
*/ */
public Scan(Get get) { public Scan(Get get) {
this.startRow = get.getRow(); this.startRow = get.getRow();
this.includeStartRow = true;
this.stopRow = get.getRow(); this.stopRow = get.getRow();
this.includeStopRow = true;
this.filter = get.getFilter(); this.filter = get.getFilter();
this.cacheBlocks = get.getCacheBlocks(); this.cacheBlocks = get.getCacheBlocks();
this.maxVersions = get.getMaxVersions(); this.maxVersions = get.getMaxVersions();
@ -266,7 +275,6 @@ public class Scan extends Query {
this.storeOffset = get.getRowOffsetPerColumnFamily(); this.storeOffset = get.getRowOffsetPerColumnFamily();
this.tr = get.getTimeRange(); this.tr = get.getTimeRange();
this.familyMap = get.getFamilyMap(); this.familyMap = get.getFamilyMap();
this.getScan = true;
this.asyncPrefetch = false; this.asyncPrefetch = false;
this.consistency = get.getConsistency(); this.consistency = get.getConsistency();
this.setIsolationLevel(get.getIsolationLevel()); this.setIsolationLevel(get.getIsolationLevel());
@ -282,13 +290,13 @@ public class Scan extends Query {
} }
public boolean isGetScan() { public boolean isGetScan() {
return this.getScan || isStartRowAndEqualsStopRow(); return includeStartRow && includeStopRow && areStartRowAndStopRowEqual(startRow, stopRow);
} }
private boolean isStartRowAndEqualsStopRow() { private static boolean areStartRowAndStopRowEqual(byte[] startRow, byte[] stopRow) {
return this.startRow != null && this.startRow.length > 0 && return startRow != null && startRow.length > 0 && Bytes.equals(startRow, stopRow);
Bytes.equals(this.startRow, this.stopRow);
} }
/** /**
* Get all columns from the specified family. * Get all columns from the specified family.
* <p> * <p>
@ -383,45 +391,120 @@ public class Scan extends Query {
/** /**
* Set the start row of the scan. * Set the start row of the scan.
* <p> * <p>
* If the specified row does not exist, the Scanner will start from the * If the specified row does not exist, the Scanner will start from the next closest row after the
* next closest row after the specified row. * specified row.
* @param startRow row to start scanner at or after * @param startRow row to start scanner at or after
* @return this * @return this
* @throws IllegalArgumentException if startRow does not meet criteria * @throws IllegalArgumentException if startRow does not meet criteria for a row key (when length
* for a row key (when length exceeds {@link HConstants#MAX_ROW_LENGTH}) * exceeds {@link HConstants#MAX_ROW_LENGTH})
* @deprecated use {@link #withStartRow(byte[])} instead. This method may change the inclusive of
* the stop row to keep compatible with the old behavior.
*/ */
public Scan setStartRow(byte [] startRow) { @Deprecated
public Scan setStartRow(byte[] startRow) {
withStartRow(startRow);
if (areStartRowAndStopRowEqual(startRow, stopRow)) {
// for keeping the old behavior that a scan with the same start and stop row is a get scan.
this.includeStopRow = true;
}
return this;
}
/**
* Set the start row of the scan.
* <p>
* If the specified row does not exist, the Scanner will start from the next closest row after the
* specified row.
* @param startRow row to start scanner at or after
* @return this
* @throws IllegalArgumentException if startRow does not meet criteria for a row key (when length
* exceeds {@link HConstants#MAX_ROW_LENGTH})
*/
public Scan withStartRow(byte[] startRow) {
return withStartRow(startRow, true);
}
/**
* Set the start row of the scan.
* <p>
* If the specified row does not exist, or the {@code inclusive} is {@code false}, the Scanner
* will start from the next closest row after the specified row.
* @param startRow row to start scanner at or after
* @param inclusive whether we should include the start row when scan
* @return this
* @throws IllegalArgumentException if startRow does not meet criteria for a row key (when length
* exceeds {@link HConstants#MAX_ROW_LENGTH})
*/
public Scan withStartRow(byte[] startRow, boolean inclusive) {
if (Bytes.len(startRow) > HConstants.MAX_ROW_LENGTH) { if (Bytes.len(startRow) > HConstants.MAX_ROW_LENGTH) {
throw new IllegalArgumentException( throw new IllegalArgumentException("startRow's length must be less than or equal to "
"startRow's length must be less than or equal to " + + HConstants.MAX_ROW_LENGTH + " to meet the criteria" + " for a row key.");
HConstants.MAX_ROW_LENGTH + " to meet the criteria" +
" for a row key.");
} }
this.startRow = startRow; this.startRow = startRow;
this.includeStartRow = inclusive;
return this; return this;
} }
/** /**
* Set the stop row of the scan. * Set the stop row of the scan.
* @param stopRow row to end at (exclusive)
* <p> * <p>
* The scan will include rows that are lexicographically less than * The scan will include rows that are lexicographically less than the provided stopRow.
* the provided stopRow. * <p>
* <p><b>Note:</b> When doing a filter for a rowKey <u>Prefix</u> * <b>Note:</b> When doing a filter for a rowKey <u>Prefix</u> use
* use {@link #setRowPrefixFilter(byte[])}. * {@link #setRowPrefixFilter(byte[])}. The 'trailing 0' will not yield the desired result.
* The 'trailing 0' will not yield the desired result.</p> * </p>
* @param stopRow row to end at (exclusive)
* @return this * @return this
* @throws IllegalArgumentException if stopRow does not meet criteria * @throws IllegalArgumentException if stopRow does not meet criteria for a row key (when length
* for a row key (when length exceeds {@link HConstants#MAX_ROW_LENGTH}) * exceeds {@link HConstants#MAX_ROW_LENGTH})
* @deprecated use {@link #withStartRow(byte[])} instead. This method may change the inclusive of
* the stop row to keep compatible with the old behavior.
*/ */
public Scan setStopRow(byte [] stopRow) { @Deprecated
public Scan setStopRow(byte[] stopRow) {
withStopRow(stopRow);
if (areStartRowAndStopRowEqual(startRow, stopRow)) {
// for keeping the old behavior that a scan with the same start and stop row is a get scan.
this.includeStopRow = true;
}
return this;
}
/**
* Set the stop row of the scan.
* <p>
* The scan will include rows that are lexicographically less than the provided stopRow.
* <p>
* <b>Note:</b> When doing a filter for a rowKey <u>Prefix</u> use
* {@link #setRowPrefixFilter(byte[])}. The 'trailing 0' will not yield the desired result.
* </p>
* @param stopRow row to end at (exclusive)
* @return this
* @throws IllegalArgumentException if stopRow does not meet criteria for a row key (when length
* exceeds {@link HConstants#MAX_ROW_LENGTH})
*/
public Scan withStopRow(byte[] stopRow) {
return withStopRow(stopRow, false);
}
/**
* Set the stop row of the scan.
* <p>
* The scan will include rows that are lexicographically less than (or equal to if
* {@code inclusive} is {@code true}) the provided stopRow.
* @param stopRow row to end at
* @param inclusive whether we should include the stop row when scan
* @return this
* @throws IllegalArgumentException if stopRow does not meet criteria for a row key (when length
* exceeds {@link HConstants#MAX_ROW_LENGTH})
*/
public Scan withStopRow(byte[] stopRow, boolean inclusive) {
if (Bytes.len(stopRow) > HConstants.MAX_ROW_LENGTH) { if (Bytes.len(stopRow) > HConstants.MAX_ROW_LENGTH) {
throw new IllegalArgumentException( throw new IllegalArgumentException("stopRow's length must be less than or equal to "
"stopRow's length must be less than or equal to " + + HConstants.MAX_ROW_LENGTH + " to meet the criteria" + " for a row key.");
HConstants.MAX_ROW_LENGTH + " to meet the criteria" +
" for a row key.");
} }
this.stopRow = stopRow; this.stopRow = stopRow;
this.includeStopRow = inclusive;
return this; return this;
} }
@ -635,13 +718,27 @@ public class Scan extends Query {
return this.startRow; return this.startRow;
} }
/**
* @return if we should include start row when scan
*/
public boolean includeStartRow() {
return includeStartRow;
}
/** /**
* @return the stoprow * @return the stoprow
*/ */
public byte [] getStopRow() { public byte[] getStopRow() {
return this.stopRow; return this.stopRow;
} }
/**
* @return if we should include stop row when scan
*/
public boolean includeStopRow() {
return includeStopRow;
}
/** /**
* @return the max number of versions to fetch * @return the max number of versions to fetch
*/ */

View File

@ -911,6 +911,12 @@ public final class ProtobufUtil {
if (mvccReadPoint > 0) { if (mvccReadPoint > 0) {
scanBuilder.setMvccReadPoint(mvccReadPoint); scanBuilder.setMvccReadPoint(mvccReadPoint);
} }
if (!scan.includeStartRow()) {
scanBuilder.setIncludeStartRow(false);
}
if (scan.includeStopRow()) {
scanBuilder.setIncludeStopRow(true);
}
return scanBuilder.build(); return scanBuilder.build();
} }
@ -923,15 +929,24 @@ public final class ProtobufUtil {
*/ */
public static Scan toScan( public static Scan toScan(
final ClientProtos.Scan proto) throws IOException { final ClientProtos.Scan proto) throws IOException {
byte [] startRow = HConstants.EMPTY_START_ROW; byte[] startRow = HConstants.EMPTY_START_ROW;
byte [] stopRow = HConstants.EMPTY_END_ROW; byte[] stopRow = HConstants.EMPTY_END_ROW;
boolean includeStartRow = true;
boolean includeStopRow = false;
if (proto.hasStartRow()) { if (proto.hasStartRow()) {
startRow = proto.getStartRow().toByteArray(); startRow = proto.getStartRow().toByteArray();
} }
if (proto.hasStopRow()) { if (proto.hasStopRow()) {
stopRow = proto.getStopRow().toByteArray(); stopRow = proto.getStopRow().toByteArray();
} }
Scan scan = new Scan(startRow, stopRow); if (proto.hasIncludeStartRow()) {
includeStartRow = proto.getIncludeStartRow();
}
if (proto.hasIncludeStopRow()) {
includeStopRow = proto.getIncludeStopRow();
}
Scan scan =
new Scan().withStartRow(startRow, includeStartRow).withStopRow(stopRow, includeStopRow);
if (proto.hasCacheBlocks()) { if (proto.hasCacheBlocks()) {
scan.setCacheBlocks(proto.getCacheBlocks()); scan.setCacheBlocks(proto.getCacheBlocks());
} }

View File

@ -1024,6 +1024,12 @@ public final class ProtobufUtil {
if (mvccReadPoint > 0) { if (mvccReadPoint > 0) {
scanBuilder.setMvccReadPoint(mvccReadPoint); scanBuilder.setMvccReadPoint(mvccReadPoint);
} }
if (!scan.includeStartRow()) {
scanBuilder.setIncludeStartRow(false);
}
if (scan.includeStopRow()) {
scanBuilder.setIncludeStopRow(true);
}
return scanBuilder.build(); return scanBuilder.build();
} }
@ -1036,15 +1042,24 @@ public final class ProtobufUtil {
*/ */
public static Scan toScan( public static Scan toScan(
final ClientProtos.Scan proto) throws IOException { final ClientProtos.Scan proto) throws IOException {
byte [] startRow = HConstants.EMPTY_START_ROW; byte[] startRow = HConstants.EMPTY_START_ROW;
byte [] stopRow = HConstants.EMPTY_END_ROW; byte[] stopRow = HConstants.EMPTY_END_ROW;
boolean includeStartRow = true;
boolean includeStopRow = false;
if (proto.hasStartRow()) { if (proto.hasStartRow()) {
startRow = proto.getStartRow().toByteArray(); startRow = proto.getStartRow().toByteArray();
} }
if (proto.hasStopRow()) { if (proto.hasStopRow()) {
stopRow = proto.getStopRow().toByteArray(); stopRow = proto.getStopRow().toByteArray();
} }
Scan scan = new Scan(startRow, stopRow); if (proto.hasIncludeStartRow()) {
includeStartRow = proto.getIncludeStartRow();
}
if (proto.hasIncludeStopRow()) {
includeStopRow = proto.getIncludeStopRow();
}
Scan scan =
new Scan().withStartRow(startRow, includeStartRow).withStopRow(stopRow, includeStopRow);
if (proto.hasCacheBlocks()) { if (proto.hasCacheBlocks()) {
scan.setCacheBlocks(proto.getCacheBlocks()); scan.setCacheBlocks(proto.getCacheBlocks());
} }

View File

@ -18,8 +18,6 @@
package org.apache.hadoop.hbase.io; package org.apache.hadoop.hbase.io;
import java.io.IOException;
import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;

View File

@ -14563,6 +14563,24 @@ public final class ClientProtos {
* <code>optional uint64 mvcc_read_point = 20 [default = 0];</code> * <code>optional uint64 mvcc_read_point = 20 [default = 0];</code>
*/ */
long getMvccReadPoint(); long getMvccReadPoint();
/**
* <code>optional bool include_start_row = 21 [default = true];</code>
*/
boolean hasIncludeStartRow();
/**
* <code>optional bool include_start_row = 21 [default = true];</code>
*/
boolean getIncludeStartRow();
/**
* <code>optional bool include_stop_row = 22 [default = false];</code>
*/
boolean hasIncludeStopRow();
/**
* <code>optional bool include_stop_row = 22 [default = false];</code>
*/
boolean getIncludeStopRow();
} }
/** /**
* <pre> * <pre>
@ -14604,6 +14622,8 @@ public final class ClientProtos {
allowPartialResults_ = false; allowPartialResults_ = false;
cfTimeRange_ = java.util.Collections.emptyList(); cfTimeRange_ = java.util.Collections.emptyList();
mvccReadPoint_ = 0L; mvccReadPoint_ = 0L;
includeStartRow_ = true;
includeStopRow_ = false;
} }
@java.lang.Override @java.lang.Override
@ -14768,6 +14788,16 @@ public final class ClientProtos {
mvccReadPoint_ = input.readUInt64(); mvccReadPoint_ = input.readUInt64();
break; break;
} }
case 168: {
bitField0_ |= 0x00020000;
includeStartRow_ = input.readBool();
break;
}
case 176: {
bitField0_ |= 0x00040000;
includeStopRow_ = input.readBool();
break;
}
} }
} }
} catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
@ -15183,6 +15213,36 @@ public final class ClientProtos {
return mvccReadPoint_; return mvccReadPoint_;
} }
public static final int INCLUDE_START_ROW_FIELD_NUMBER = 21;
private boolean includeStartRow_;
/**
* <code>optional bool include_start_row = 21 [default = true];</code>
*/
public boolean hasIncludeStartRow() {
return ((bitField0_ & 0x00020000) == 0x00020000);
}
/**
* <code>optional bool include_start_row = 21 [default = true];</code>
*/
public boolean getIncludeStartRow() {
return includeStartRow_;
}
public static final int INCLUDE_STOP_ROW_FIELD_NUMBER = 22;
private boolean includeStopRow_;
/**
* <code>optional bool include_stop_row = 22 [default = false];</code>
*/
public boolean hasIncludeStopRow() {
return ((bitField0_ & 0x00040000) == 0x00040000);
}
/**
* <code>optional bool include_stop_row = 22 [default = false];</code>
*/
public boolean getIncludeStopRow() {
return includeStopRow_;
}
private byte memoizedIsInitialized = -1; private byte memoizedIsInitialized = -1;
public final boolean isInitialized() { public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized; byte isInitialized = memoizedIsInitialized;
@ -15279,6 +15339,12 @@ public final class ClientProtos {
if (((bitField0_ & 0x00010000) == 0x00010000)) { if (((bitField0_ & 0x00010000) == 0x00010000)) {
output.writeUInt64(20, mvccReadPoint_); output.writeUInt64(20, mvccReadPoint_);
} }
if (((bitField0_ & 0x00020000) == 0x00020000)) {
output.writeBool(21, includeStartRow_);
}
if (((bitField0_ & 0x00040000) == 0x00040000)) {
output.writeBool(22, includeStopRow_);
}
unknownFields.writeTo(output); unknownFields.writeTo(output);
} }
@ -15367,6 +15433,14 @@ public final class ClientProtos {
size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
.computeUInt64Size(20, mvccReadPoint_); .computeUInt64Size(20, mvccReadPoint_);
} }
if (((bitField0_ & 0x00020000) == 0x00020000)) {
size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
.computeBoolSize(21, includeStartRow_);
}
if (((bitField0_ & 0x00040000) == 0x00040000)) {
size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
.computeBoolSize(22, includeStopRow_);
}
size += unknownFields.getSerializedSize(); size += unknownFields.getSerializedSize();
memoizedSize = size; memoizedSize = size;
return size; return size;
@ -15474,6 +15548,16 @@ public final class ClientProtos {
result = result && (getMvccReadPoint() result = result && (getMvccReadPoint()
== other.getMvccReadPoint()); == other.getMvccReadPoint());
} }
result = result && (hasIncludeStartRow() == other.hasIncludeStartRow());
if (hasIncludeStartRow()) {
result = result && (getIncludeStartRow()
== other.getIncludeStartRow());
}
result = result && (hasIncludeStopRow() == other.hasIncludeStopRow());
if (hasIncludeStopRow()) {
result = result && (getIncludeStopRow()
== other.getIncludeStopRow());
}
result = result && unknownFields.equals(other.unknownFields); result = result && unknownFields.equals(other.unknownFields);
return result; return result;
} }
@ -15572,6 +15656,16 @@ public final class ClientProtos {
hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashLong( hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashLong(
getMvccReadPoint()); getMvccReadPoint());
} }
if (hasIncludeStartRow()) {
hash = (37 * hash) + INCLUDE_START_ROW_FIELD_NUMBER;
hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashBoolean(
getIncludeStartRow());
}
if (hasIncludeStopRow()) {
hash = (37 * hash) + INCLUDE_STOP_ROW_FIELD_NUMBER;
hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashBoolean(
getIncludeStopRow());
}
hash = (29 * hash) + unknownFields.hashCode(); hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash; memoizedHashCode = hash;
return hash; return hash;
@ -15765,6 +15859,10 @@ public final class ClientProtos {
} }
mvccReadPoint_ = 0L; mvccReadPoint_ = 0L;
bitField0_ = (bitField0_ & ~0x00080000); bitField0_ = (bitField0_ & ~0x00080000);
includeStartRow_ = true;
bitField0_ = (bitField0_ & ~0x00100000);
includeStopRow_ = false;
bitField0_ = (bitField0_ & ~0x00200000);
return this; return this;
} }
@ -15892,6 +15990,14 @@ public final class ClientProtos {
to_bitField0_ |= 0x00010000; to_bitField0_ |= 0x00010000;
} }
result.mvccReadPoint_ = mvccReadPoint_; result.mvccReadPoint_ = mvccReadPoint_;
if (((from_bitField0_ & 0x00100000) == 0x00100000)) {
to_bitField0_ |= 0x00020000;
}
result.includeStartRow_ = includeStartRow_;
if (((from_bitField0_ & 0x00200000) == 0x00200000)) {
to_bitField0_ |= 0x00040000;
}
result.includeStopRow_ = includeStopRow_;
result.bitField0_ = to_bitField0_; result.bitField0_ = to_bitField0_;
onBuilt(); onBuilt();
return result; return result;
@ -16063,6 +16169,12 @@ public final class ClientProtos {
if (other.hasMvccReadPoint()) { if (other.hasMvccReadPoint()) {
setMvccReadPoint(other.getMvccReadPoint()); setMvccReadPoint(other.getMvccReadPoint());
} }
if (other.hasIncludeStartRow()) {
setIncludeStartRow(other.getIncludeStartRow());
}
if (other.hasIncludeStopRow()) {
setIncludeStopRow(other.getIncludeStopRow());
}
this.mergeUnknownFields(other.unknownFields); this.mergeUnknownFields(other.unknownFields);
onChanged(); onChanged();
return this; return this;
@ -17572,6 +17684,70 @@ public final class ClientProtos {
onChanged(); onChanged();
return this; return this;
} }
private boolean includeStartRow_ = true;
/**
* <code>optional bool include_start_row = 21 [default = true];</code>
*/
public boolean hasIncludeStartRow() {
return ((bitField0_ & 0x00100000) == 0x00100000);
}
/**
* <code>optional bool include_start_row = 21 [default = true];</code>
*/
public boolean getIncludeStartRow() {
return includeStartRow_;
}
/**
* <code>optional bool include_start_row = 21 [default = true];</code>
*/
public Builder setIncludeStartRow(boolean value) {
bitField0_ |= 0x00100000;
includeStartRow_ = value;
onChanged();
return this;
}
/**
* <code>optional bool include_start_row = 21 [default = true];</code>
*/
public Builder clearIncludeStartRow() {
bitField0_ = (bitField0_ & ~0x00100000);
includeStartRow_ = true;
onChanged();
return this;
}
private boolean includeStopRow_ ;
/**
* <code>optional bool include_stop_row = 22 [default = false];</code>
*/
public boolean hasIncludeStopRow() {
return ((bitField0_ & 0x00200000) == 0x00200000);
}
/**
* <code>optional bool include_stop_row = 22 [default = false];</code>
*/
public boolean getIncludeStopRow() {
return includeStopRow_;
}
/**
* <code>optional bool include_stop_row = 22 [default = false];</code>
*/
public Builder setIncludeStopRow(boolean value) {
bitField0_ |= 0x00200000;
includeStopRow_ = value;
onChanged();
return this;
}
/**
* <code>optional bool include_stop_row = 22 [default = false];</code>
*/
public Builder clearIncludeStopRow() {
bitField0_ = (bitField0_ & ~0x00200000);
includeStopRow_ = false;
onChanged();
return this;
}
public final Builder setUnknownFields( public final Builder setUnknownFields(
final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields); return super.setUnknownFields(unknownFields);
@ -40658,7 +40834,7 @@ public final class ClientProtos {
"tion\030\003 \001(\0132\023.hbase.pb.Condition\022\023\n\013nonce" + "tion\030\003 \001(\0132\023.hbase.pb.Condition\022\023\n\013nonce" +
"_group\030\004 \001(\004\"E\n\016MutateResponse\022 \n\006result" + "_group\030\004 \001(\004\"E\n\016MutateResponse\022 \n\006result" +
"\030\001 \001(\0132\020.hbase.pb.Result\022\021\n\tprocessed\030\002 " + "\030\001 \001(\0132\020.hbase.pb.Result\022\021\n\tprocessed\030\002 " +
"\001(\010\"\331\004\n\004Scan\022 \n\006column\030\001 \003(\0132\020.hbase.pb." + "\001(\010\"\233\005\n\004Scan\022 \n\006column\030\001 \003(\0132\020.hbase.pb." +
"Column\022*\n\tattribute\030\002 \003(\0132\027.hbase.pb.Nam" + "Column\022*\n\tattribute\030\002 \003(\0132\027.hbase.pb.Nam" +
"eBytesPair\022\021\n\tstart_row\030\003 \001(\014\022\020\n\010stop_ro" + "eBytesPair\022\021\n\tstart_row\030\003 \001(\014\022\020\n\010stop_ro" +
"w\030\004 \001(\014\022 \n\006filter\030\005 \001(\0132\020.hbase.pb.Filte" + "w\030\004 \001(\014\022 \n\006filter\030\005 \001(\0132\020.hbase.pb.Filte" +
@ -40673,97 +40849,98 @@ public final class ClientProtos {
"aching\030\021 \001(\r\022\035\n\025allow_partial_results\030\022 " + "aching\030\021 \001(\r\022\035\n\025allow_partial_results\030\022 " +
"\001(\010\0226\n\rcf_time_range\030\023 \003(\0132\037.hbase.pb.Co" + "\001(\010\0226\n\rcf_time_range\030\023 \003(\0132\037.hbase.pb.Co" +
"lumnFamilyTimeRange\022\032\n\017mvcc_read_point\030\024", "lumnFamilyTimeRange\022\032\n\017mvcc_read_point\030\024",
" \001(\004:\0010\"\246\002\n\013ScanRequest\022)\n\006region\030\001 \001(\0132" + " \001(\004:\0010\022\037\n\021include_start_row\030\025 \001(\010:\004true" +
"\031.hbase.pb.RegionSpecifier\022\034\n\004scan\030\002 \001(\013" + "\022\037\n\020include_stop_row\030\026 \001(\010:\005false\"\246\002\n\013Sc" +
"2\016.hbase.pb.Scan\022\022\n\nscanner_id\030\003 \001(\004\022\026\n\016" + "anRequest\022)\n\006region\030\001 \001(\0132\031.hbase.pb.Reg" +
"number_of_rows\030\004 \001(\r\022\025\n\rclose_scanner\030\005 " + "ionSpecifier\022\034\n\004scan\030\002 \001(\0132\016.hbase.pb.Sc" +
"\001(\010\022\025\n\rnext_call_seq\030\006 \001(\004\022\037\n\027client_han" + "an\022\022\n\nscanner_id\030\003 \001(\004\022\026\n\016number_of_rows" +
"dles_partials\030\007 \001(\010\022!\n\031client_handles_he" + "\030\004 \001(\r\022\025\n\rclose_scanner\030\005 \001(\010\022\025\n\rnext_ca" +
"artbeats\030\010 \001(\010\022\032\n\022track_scan_metrics\030\t \001" + "ll_seq\030\006 \001(\004\022\037\n\027client_handles_partials\030" +
"(\010\022\024\n\005renew\030\n \001(\010:\005false\"\266\002\n\014ScanRespons" + "\007 \001(\010\022!\n\031client_handles_heartbeats\030\010 \001(\010" +
"e\022\030\n\020cells_per_result\030\001 \003(\r\022\022\n\nscanner_i" + "\022\032\n\022track_scan_metrics\030\t \001(\010\022\024\n\005renew\030\n " +
"d\030\002 \001(\004\022\024\n\014more_results\030\003 \001(\010\022\013\n\003ttl\030\004 \001", "\001(\010:\005false\"\266\002\n\014ScanResponse\022\030\n\020cells_per",
"(\r\022!\n\007results\030\005 \003(\0132\020.hbase.pb.Result\022\r\n" + "_result\030\001 \003(\r\022\022\n\nscanner_id\030\002 \001(\004\022\024\n\014mor" +
"\005stale\030\006 \001(\010\022\037\n\027partial_flag_per_result\030" + "e_results\030\003 \001(\010\022\013\n\003ttl\030\004 \001(\r\022!\n\007results\030" +
"\007 \003(\010\022\036\n\026more_results_in_region\030\010 \001(\010\022\031\n" + "\005 \003(\0132\020.hbase.pb.Result\022\r\n\005stale\030\006 \001(\010\022\037" +
"\021heartbeat_message\030\t \001(\010\022+\n\014scan_metrics" + "\n\027partial_flag_per_result\030\007 \003(\010\022\036\n\026more_" +
"\030\n \001(\0132\025.hbase.pb.ScanMetrics\022\032\n\017mvcc_re" + "results_in_region\030\010 \001(\010\022\031\n\021heartbeat_mes" +
"ad_point\030\013 \001(\004:\0010\"\240\002\n\024BulkLoadHFileReque" + "sage\030\t \001(\010\022+\n\014scan_metrics\030\n \001(\0132\025.hbase" +
"st\022)\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSpec" + ".pb.ScanMetrics\022\032\n\017mvcc_read_point\030\013 \001(\004" +
"ifier\022>\n\013family_path\030\002 \003(\0132).hbase.pb.Bu" + ":\0010\"\240\002\n\024BulkLoadHFileRequest\022)\n\006region\030\001" +
"lkLoadHFileRequest.FamilyPath\022\026\n\016assign_" + " \002(\0132\031.hbase.pb.RegionSpecifier\022>\n\013famil" +
"seq_num\030\003 \001(\010\022+\n\010fs_token\030\004 \001(\0132\031.hbase.", "y_path\030\002 \003(\0132).hbase.pb.BulkLoadHFileReq",
"pb.DelegationToken\022\022\n\nbulk_token\030\005 \001(\t\022\030" + "uest.FamilyPath\022\026\n\016assign_seq_num\030\003 \001(\010\022" +
"\n\tcopy_file\030\006 \001(\010:\005false\032*\n\nFamilyPath\022\016" + "+\n\010fs_token\030\004 \001(\0132\031.hbase.pb.DelegationT" +
"\n\006family\030\001 \002(\014\022\014\n\004path\030\002 \002(\t\"\'\n\025BulkLoad" + "oken\022\022\n\nbulk_token\030\005 \001(\t\022\030\n\tcopy_file\030\006 " +
"HFileResponse\022\016\n\006loaded\030\001 \002(\010\"V\n\017Delegat" + "\001(\010:\005false\032*\n\nFamilyPath\022\016\n\006family\030\001 \002(\014" +
"ionToken\022\022\n\nidentifier\030\001 \001(\014\022\020\n\010password" + "\022\014\n\004path\030\002 \002(\t\"\'\n\025BulkLoadHFileResponse\022" +
"\030\002 \001(\014\022\014\n\004kind\030\003 \001(\t\022\017\n\007service\030\004 \001(\t\"l\n" + "\016\n\006loaded\030\001 \002(\010\"V\n\017DelegationToken\022\022\n\nid" +
"\026PrepareBulkLoadRequest\022\'\n\ntable_name\030\001 " + "entifier\030\001 \001(\014\022\020\n\010password\030\002 \001(\014\022\014\n\004kind" +
"\002(\0132\023.hbase.pb.TableName\022)\n\006region\030\002 \001(\013" + "\030\003 \001(\t\022\017\n\007service\030\004 \001(\t\"l\n\026PrepareBulkLo" +
"2\031.hbase.pb.RegionSpecifier\"-\n\027PrepareBu" + "adRequest\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb" +
"lkLoadResponse\022\022\n\nbulk_token\030\001 \002(\t\"W\n\026Cl", ".TableName\022)\n\006region\030\002 \001(\0132\031.hbase.pb.Re",
"eanupBulkLoadRequest\022\022\n\nbulk_token\030\001 \002(\t" + "gionSpecifier\"-\n\027PrepareBulkLoadResponse" +
"\022)\n\006region\030\002 \001(\0132\031.hbase.pb.RegionSpecif" + "\022\022\n\nbulk_token\030\001 \002(\t\"W\n\026CleanupBulkLoadR" +
"ier\"\031\n\027CleanupBulkLoadResponse\"a\n\026Coproc" + "equest\022\022\n\nbulk_token\030\001 \002(\t\022)\n\006region\030\002 \001" +
"essorServiceCall\022\013\n\003row\030\001 \002(\014\022\024\n\014service" + "(\0132\031.hbase.pb.RegionSpecifier\"\031\n\027Cleanup" +
"_name\030\002 \002(\t\022\023\n\013method_name\030\003 \002(\t\022\017\n\007requ" + "BulkLoadResponse\"a\n\026CoprocessorServiceCa" +
"est\030\004 \002(\014\"B\n\030CoprocessorServiceResult\022&\n" + "ll\022\013\n\003row\030\001 \002(\014\022\024\n\014service_name\030\002 \002(\t\022\023\n" +
"\005value\030\001 \001(\0132\027.hbase.pb.NameBytesPair\"v\n" + "\013method_name\030\003 \002(\t\022\017\n\007request\030\004 \002(\014\"B\n\030C" +
"\031CoprocessorServiceRequest\022)\n\006region\030\001 \002" + "oprocessorServiceResult\022&\n\005value\030\001 \001(\0132\027" +
"(\0132\031.hbase.pb.RegionSpecifier\022.\n\004call\030\002 " + ".hbase.pb.NameBytesPair\"v\n\031CoprocessorSe" +
"\002(\0132 .hbase.pb.CoprocessorServiceCall\"o\n", "rviceRequest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.",
"\032CoprocessorServiceResponse\022)\n\006region\030\001 " + "RegionSpecifier\022.\n\004call\030\002 \002(\0132 .hbase.pb" +
"\002(\0132\031.hbase.pb.RegionSpecifier\022&\n\005value\030" + ".CoprocessorServiceCall\"o\n\032CoprocessorSe" +
"\002 \002(\0132\027.hbase.pb.NameBytesPair\"\226\001\n\006Actio" + "rviceResponse\022)\n\006region\030\001 \002(\0132\031.hbase.pb" +
"n\022\r\n\005index\030\001 \001(\r\022)\n\010mutation\030\002 \001(\0132\027.hba" + ".RegionSpecifier\022&\n\005value\030\002 \002(\0132\027.hbase." +
"se.pb.MutationProto\022\032\n\003get\030\003 \001(\0132\r.hbase" + "pb.NameBytesPair\"\226\001\n\006Action\022\r\n\005index\030\001 \001" +
".pb.Get\0226\n\014service_call\030\004 \001(\0132 .hbase.pb" + "(\r\022)\n\010mutation\030\002 \001(\0132\027.hbase.pb.Mutation" +
".CoprocessorServiceCall\"k\n\014RegionAction\022" + "Proto\022\032\n\003get\030\003 \001(\0132\r.hbase.pb.Get\0226\n\014ser" +
")\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSpecifi" + "vice_call\030\004 \001(\0132 .hbase.pb.CoprocessorSe" +
"er\022\016\n\006atomic\030\002 \001(\010\022 \n\006action\030\003 \003(\0132\020.hba" + "rviceCall\"k\n\014RegionAction\022)\n\006region\030\001 \002(" +
"se.pb.Action\"c\n\017RegionLoadStats\022\027\n\014memst", "\0132\031.hbase.pb.RegionSpecifier\022\016\n\006atomic\030\002",
"oreLoad\030\001 \001(\005:\0010\022\030\n\rheapOccupancy\030\002 \001(\005:" + " \001(\010\022 \n\006action\030\003 \003(\0132\020.hbase.pb.Action\"c" +
"\0010\022\035\n\022compactionPressure\030\003 \001(\005:\0010\"j\n\024Mul" + "\n\017RegionLoadStats\022\027\n\014memstoreLoad\030\001 \001(\005:" +
"tiRegionLoadStats\022)\n\006region\030\001 \003(\0132\031.hbas" + "\0010\022\030\n\rheapOccupancy\030\002 \001(\005:\0010\022\035\n\022compacti" +
"e.pb.RegionSpecifier\022\'\n\004stat\030\002 \003(\0132\031.hba" + "onPressure\030\003 \001(\005:\0010\"j\n\024MultiRegionLoadSt" +
"se.pb.RegionLoadStats\"\336\001\n\021ResultOrExcept" + "ats\022)\n\006region\030\001 \003(\0132\031.hbase.pb.RegionSpe" +
"ion\022\r\n\005index\030\001 \001(\r\022 \n\006result\030\002 \001(\0132\020.hba" + "cifier\022\'\n\004stat\030\002 \003(\0132\031.hbase.pb.RegionLo" +
"se.pb.Result\022*\n\texception\030\003 \001(\0132\027.hbase." + "adStats\"\336\001\n\021ResultOrException\022\r\n\005index\030\001" +
"pb.NameBytesPair\022:\n\016service_result\030\004 \001(\013" + " \001(\r\022 \n\006result\030\002 \001(\0132\020.hbase.pb.Result\022*" +
"2\".hbase.pb.CoprocessorServiceResult\0220\n\t" + "\n\texception\030\003 \001(\0132\027.hbase.pb.NameBytesPa" +
"loadStats\030\005 \001(\0132\031.hbase.pb.RegionLoadSta", "ir\022:\n\016service_result\030\004 \001(\0132\".hbase.pb.Co",
"tsB\002\030\001\"x\n\022RegionActionResult\0226\n\021resultOr" + "processorServiceResult\0220\n\tloadStats\030\005 \001(" +
"Exception\030\001 \003(\0132\033.hbase.pb.ResultOrExcep" + "\0132\031.hbase.pb.RegionLoadStatsB\002\030\001\"x\n\022Regi" +
"tion\022*\n\texception\030\002 \001(\0132\027.hbase.pb.NameB" + "onActionResult\0226\n\021resultOrException\030\001 \003(" +
"ytesPair\"x\n\014MultiRequest\022,\n\014regionAction" + "\0132\033.hbase.pb.ResultOrException\022*\n\texcept" +
"\030\001 \003(\0132\026.hbase.pb.RegionAction\022\022\n\nnonceG" + "ion\030\002 \001(\0132\027.hbase.pb.NameBytesPair\"x\n\014Mu" +
"roup\030\002 \001(\004\022&\n\tcondition\030\003 \001(\0132\023.hbase.pb" + "ltiRequest\022,\n\014regionAction\030\001 \003(\0132\026.hbase" +
".Condition\"\226\001\n\rMultiResponse\0228\n\022regionAc" + ".pb.RegionAction\022\022\n\nnonceGroup\030\002 \001(\004\022&\n\t" +
"tionResult\030\001 \003(\0132\034.hbase.pb.RegionAction" + "condition\030\003 \001(\0132\023.hbase.pb.Condition\"\226\001\n" +
"Result\022\021\n\tprocessed\030\002 \001(\010\0228\n\020regionStati" + "\rMultiResponse\0228\n\022regionActionResult\030\001 \003" +
"stics\030\003 \001(\0132\036.hbase.pb.MultiRegionLoadSt", "(\0132\034.hbase.pb.RegionActionResult\022\021\n\tproc",
"ats*\'\n\013Consistency\022\n\n\006STRONG\020\000\022\014\n\010TIMELI" + "essed\030\002 \001(\010\0228\n\020regionStatistics\030\003 \001(\0132\036." +
"NE\020\0012\263\005\n\rClientService\0222\n\003Get\022\024.hbase.pb" + "hbase.pb.MultiRegionLoadStats*\'\n\013Consist" +
".GetRequest\032\025.hbase.pb.GetResponse\022;\n\006Mu" + "ency\022\n\n\006STRONG\020\000\022\014\n\010TIMELINE\020\0012\263\005\n\rClien" +
"tate\022\027.hbase.pb.MutateRequest\032\030.hbase.pb" + "tService\0222\n\003Get\022\024.hbase.pb.GetRequest\032\025." +
".MutateResponse\0225\n\004Scan\022\025.hbase.pb.ScanR" + "hbase.pb.GetResponse\022;\n\006Mutate\022\027.hbase.p" +
"equest\032\026.hbase.pb.ScanResponse\022P\n\rBulkLo" + "b.MutateRequest\032\030.hbase.pb.MutateRespons" +
"adHFile\022\036.hbase.pb.BulkLoadHFileRequest\032" + "e\0225\n\004Scan\022\025.hbase.pb.ScanRequest\032\026.hbase" +
"\037.hbase.pb.BulkLoadHFileResponse\022V\n\017Prep" + ".pb.ScanResponse\022P\n\rBulkLoadHFile\022\036.hbas" +
"areBulkLoad\022 .hbase.pb.PrepareBulkLoadRe" + "e.pb.BulkLoadHFileRequest\032\037.hbase.pb.Bul" +
"quest\032!.hbase.pb.PrepareBulkLoadResponse", "kLoadHFileResponse\022V\n\017PrepareBulkLoad\022 .",
"\022V\n\017CleanupBulkLoad\022 .hbase.pb.CleanupBu" + "hbase.pb.PrepareBulkLoadRequest\032!.hbase." +
"lkLoadRequest\032!.hbase.pb.CleanupBulkLoad" + "pb.PrepareBulkLoadResponse\022V\n\017CleanupBul" +
"Response\022X\n\013ExecService\022#.hbase.pb.Copro" + "kLoad\022 .hbase.pb.CleanupBulkLoadRequest\032" +
"cessorServiceRequest\032$.hbase.pb.Coproces" + "!.hbase.pb.CleanupBulkLoadResponse\022X\n\013Ex" +
"sorServiceResponse\022d\n\027ExecRegionServerSe" + "ecService\022#.hbase.pb.CoprocessorServiceR" +
"rvice\022#.hbase.pb.CoprocessorServiceReque" + "equest\032$.hbase.pb.CoprocessorServiceResp" +
"st\032$.hbase.pb.CoprocessorServiceResponse" + "onse\022d\n\027ExecRegionServerService\022#.hbase." +
"\0228\n\005Multi\022\026.hbase.pb.MultiRequest\032\027.hbas" + "pb.CoprocessorServiceRequest\032$.hbase.pb." +
"e.pb.MultiResponseBI\n1org.apache.hadoop." + "CoprocessorServiceResponse\0228\n\005Multi\022\026.hb" +
"hbase.shaded.protobuf.generatedB\014ClientP", "ase.pb.MultiRequest\032\027.hbase.pb.MultiResp",
"rotosH\001\210\001\001\240\001\001" "onseBI\n1org.apache.hadoop.hbase.shaded.p" +
"rotobuf.generatedB\014ClientProtosH\001\210\001\001\240\001\001"
}; };
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() { new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() {
@ -40865,7 +41042,7 @@ public final class ClientProtos {
internal_static_hbase_pb_Scan_fieldAccessorTable = new internal_static_hbase_pb_Scan_fieldAccessorTable = new
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hbase_pb_Scan_descriptor, internal_static_hbase_pb_Scan_descriptor,
new java.lang.String[] { "Column", "Attribute", "StartRow", "StopRow", "Filter", "TimeRange", "MaxVersions", "CacheBlocks", "BatchSize", "MaxResultSize", "StoreLimit", "StoreOffset", "LoadColumnFamiliesOnDemand", "Small", "Reversed", "Consistency", "Caching", "AllowPartialResults", "CfTimeRange", "MvccReadPoint", }); new java.lang.String[] { "Column", "Attribute", "StartRow", "StopRow", "Filter", "TimeRange", "MaxVersions", "CacheBlocks", "BatchSize", "MaxResultSize", "StoreLimit", "StoreOffset", "LoadColumnFamiliesOnDemand", "Small", "Reversed", "Consistency", "Caching", "AllowPartialResults", "CfTimeRange", "MvccReadPoint", "IncludeStartRow", "IncludeStopRow", });
internal_static_hbase_pb_ScanRequest_descriptor = internal_static_hbase_pb_ScanRequest_descriptor =
getDescriptor().getMessageTypes().get(12); getDescriptor().getMessageTypes().get(12);
internal_static_hbase_pb_ScanRequest_fieldAccessorTable = new internal_static_hbase_pb_ScanRequest_fieldAccessorTable = new

View File

@ -256,6 +256,8 @@ message Scan {
optional bool allow_partial_results = 18; optional bool allow_partial_results = 18;
repeated ColumnFamilyTimeRange cf_time_range = 19; repeated ColumnFamilyTimeRange cf_time_range = 19;
optional uint64 mvcc_read_point = 20 [default = 0]; optional uint64 mvcc_read_point = 20 [default = 0];
optional bool include_start_row = 21 [default = true];
optional bool include_stop_row = 22 [default = false];
} }
/** /**

View File

@ -14229,6 +14229,26 @@ public final class ClientProtos {
* <code>optional uint64 mvcc_read_point = 20 [default = 0];</code> * <code>optional uint64 mvcc_read_point = 20 [default = 0];</code>
*/ */
long getMvccReadPoint(); long getMvccReadPoint();
// optional bool include_start_row = 21 [default = true];
/**
* <code>optional bool include_start_row = 21 [default = true];</code>
*/
boolean hasIncludeStartRow();
/**
* <code>optional bool include_start_row = 21 [default = true];</code>
*/
boolean getIncludeStartRow();
// optional bool include_stop_row = 22 [default = false];
/**
* <code>optional bool include_stop_row = 22 [default = false];</code>
*/
boolean hasIncludeStopRow();
/**
* <code>optional bool include_stop_row = 22 [default = false];</code>
*/
boolean getIncludeStopRow();
} }
/** /**
* Protobuf type {@code hbase.pb.Scan} * Protobuf type {@code hbase.pb.Scan}
@ -14423,6 +14443,16 @@ public final class ClientProtos {
mvccReadPoint_ = input.readUInt64(); mvccReadPoint_ = input.readUInt64();
break; break;
} }
case 168: {
bitField0_ |= 0x00020000;
includeStartRow_ = input.readBool();
break;
}
case 176: {
bitField0_ |= 0x00040000;
includeStopRow_ = input.readBool();
break;
}
} }
} }
} catch (com.google.protobuf.InvalidProtocolBufferException e) { } catch (com.google.protobuf.InvalidProtocolBufferException e) {
@ -14872,6 +14902,38 @@ public final class ClientProtos {
return mvccReadPoint_; return mvccReadPoint_;
} }
// optional bool include_start_row = 21 [default = true];
public static final int INCLUDE_START_ROW_FIELD_NUMBER = 21;
private boolean includeStartRow_;
/**
* <code>optional bool include_start_row = 21 [default = true];</code>
*/
public boolean hasIncludeStartRow() {
return ((bitField0_ & 0x00020000) == 0x00020000);
}
/**
* <code>optional bool include_start_row = 21 [default = true];</code>
*/
public boolean getIncludeStartRow() {
return includeStartRow_;
}
// optional bool include_stop_row = 22 [default = false];
public static final int INCLUDE_STOP_ROW_FIELD_NUMBER = 22;
private boolean includeStopRow_;
/**
* <code>optional bool include_stop_row = 22 [default = false];</code>
*/
public boolean hasIncludeStopRow() {
return ((bitField0_ & 0x00040000) == 0x00040000);
}
/**
* <code>optional bool include_stop_row = 22 [default = false];</code>
*/
public boolean getIncludeStopRow() {
return includeStopRow_;
}
private void initFields() { private void initFields() {
column_ = java.util.Collections.emptyList(); column_ = java.util.Collections.emptyList();
attribute_ = java.util.Collections.emptyList(); attribute_ = java.util.Collections.emptyList();
@ -14893,6 +14955,8 @@ public final class ClientProtos {
allowPartialResults_ = false; allowPartialResults_ = false;
cfTimeRange_ = java.util.Collections.emptyList(); cfTimeRange_ = java.util.Collections.emptyList();
mvccReadPoint_ = 0L; mvccReadPoint_ = 0L;
includeStartRow_ = true;
includeStopRow_ = false;
} }
private byte memoizedIsInitialized = -1; private byte memoizedIsInitialized = -1;
public final boolean isInitialized() { public final boolean isInitialized() {
@ -14990,6 +15054,12 @@ public final class ClientProtos {
if (((bitField0_ & 0x00010000) == 0x00010000)) { if (((bitField0_ & 0x00010000) == 0x00010000)) {
output.writeUInt64(20, mvccReadPoint_); output.writeUInt64(20, mvccReadPoint_);
} }
if (((bitField0_ & 0x00020000) == 0x00020000)) {
output.writeBool(21, includeStartRow_);
}
if (((bitField0_ & 0x00040000) == 0x00040000)) {
output.writeBool(22, includeStopRow_);
}
getUnknownFields().writeTo(output); getUnknownFields().writeTo(output);
} }
@ -15079,6 +15149,14 @@ public final class ClientProtos {
size += com.google.protobuf.CodedOutputStream size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(20, mvccReadPoint_); .computeUInt64Size(20, mvccReadPoint_);
} }
if (((bitField0_ & 0x00020000) == 0x00020000)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(21, includeStartRow_);
}
if (((bitField0_ & 0x00040000) == 0x00040000)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(22, includeStopRow_);
}
size += getUnknownFields().getSerializedSize(); size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size; memoizedSerializedSize = size;
return size; return size;
@ -15193,6 +15271,16 @@ public final class ClientProtos {
result = result && (getMvccReadPoint() result = result && (getMvccReadPoint()
== other.getMvccReadPoint()); == other.getMvccReadPoint());
} }
result = result && (hasIncludeStartRow() == other.hasIncludeStartRow());
if (hasIncludeStartRow()) {
result = result && (getIncludeStartRow()
== other.getIncludeStartRow());
}
result = result && (hasIncludeStopRow() == other.hasIncludeStopRow());
if (hasIncludeStopRow()) {
result = result && (getIncludeStopRow()
== other.getIncludeStopRow());
}
result = result && result = result &&
getUnknownFields().equals(other.getUnknownFields()); getUnknownFields().equals(other.getUnknownFields());
return result; return result;
@ -15286,6 +15374,14 @@ public final class ClientProtos {
hash = (37 * hash) + MVCC_READ_POINT_FIELD_NUMBER; hash = (37 * hash) + MVCC_READ_POINT_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getMvccReadPoint()); hash = (53 * hash) + hashLong(getMvccReadPoint());
} }
if (hasIncludeStartRow()) {
hash = (37 * hash) + INCLUDE_START_ROW_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getIncludeStartRow());
}
if (hasIncludeStopRow()) {
hash = (37 * hash) + INCLUDE_STOP_ROW_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getIncludeStopRow());
}
hash = (29 * hash) + getUnknownFields().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash; memoizedHashCode = hash;
return hash; return hash;
@ -15471,6 +15567,10 @@ public final class ClientProtos {
} }
mvccReadPoint_ = 0L; mvccReadPoint_ = 0L;
bitField0_ = (bitField0_ & ~0x00080000); bitField0_ = (bitField0_ & ~0x00080000);
includeStartRow_ = true;
bitField0_ = (bitField0_ & ~0x00100000);
includeStopRow_ = false;
bitField0_ = (bitField0_ & ~0x00200000);
return this; return this;
} }
@ -15602,6 +15702,14 @@ public final class ClientProtos {
to_bitField0_ |= 0x00010000; to_bitField0_ |= 0x00010000;
} }
result.mvccReadPoint_ = mvccReadPoint_; result.mvccReadPoint_ = mvccReadPoint_;
if (((from_bitField0_ & 0x00100000) == 0x00100000)) {
to_bitField0_ |= 0x00020000;
}
result.includeStartRow_ = includeStartRow_;
if (((from_bitField0_ & 0x00200000) == 0x00200000)) {
to_bitField0_ |= 0x00040000;
}
result.includeStopRow_ = includeStopRow_;
result.bitField0_ = to_bitField0_; result.bitField0_ = to_bitField0_;
onBuilt(); onBuilt();
return result; return result;
@ -15747,6 +15855,12 @@ public final class ClientProtos {
if (other.hasMvccReadPoint()) { if (other.hasMvccReadPoint()) {
setMvccReadPoint(other.getMvccReadPoint()); setMvccReadPoint(other.getMvccReadPoint());
} }
if (other.hasIncludeStartRow()) {
setIncludeStartRow(other.getIncludeStartRow());
}
if (other.hasIncludeStopRow()) {
setIncludeStopRow(other.getIncludeStopRow());
}
this.mergeUnknownFields(other.getUnknownFields()); this.mergeUnknownFields(other.getUnknownFields());
return this; return this;
} }
@ -17272,6 +17386,72 @@ public final class ClientProtos {
return this; return this;
} }
// optional bool include_start_row = 21 [default = true];
private boolean includeStartRow_ = true;
/**
* <code>optional bool include_start_row = 21 [default = true];</code>
*/
public boolean hasIncludeStartRow() {
return ((bitField0_ & 0x00100000) == 0x00100000);
}
/**
* <code>optional bool include_start_row = 21 [default = true];</code>
*/
public boolean getIncludeStartRow() {
return includeStartRow_;
}
/**
* <code>optional bool include_start_row = 21 [default = true];</code>
*/
public Builder setIncludeStartRow(boolean value) {
bitField0_ |= 0x00100000;
includeStartRow_ = value;
onChanged();
return this;
}
/**
* <code>optional bool include_start_row = 21 [default = true];</code>
*/
public Builder clearIncludeStartRow() {
bitField0_ = (bitField0_ & ~0x00100000);
includeStartRow_ = true;
onChanged();
return this;
}
// optional bool include_stop_row = 22 [default = false];
private boolean includeStopRow_ ;
/**
* <code>optional bool include_stop_row = 22 [default = false];</code>
*/
public boolean hasIncludeStopRow() {
return ((bitField0_ & 0x00200000) == 0x00200000);
}
/**
* <code>optional bool include_stop_row = 22 [default = false];</code>
*/
public boolean getIncludeStopRow() {
return includeStopRow_;
}
/**
* <code>optional bool include_stop_row = 22 [default = false];</code>
*/
public Builder setIncludeStopRow(boolean value) {
bitField0_ |= 0x00200000;
includeStopRow_ = value;
onChanged();
return this;
}
/**
* <code>optional bool include_stop_row = 22 [default = false];</code>
*/
public Builder clearIncludeStopRow() {
bitField0_ = (bitField0_ & ~0x00200000);
includeStopRow_ = false;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hbase.pb.Scan) // @@protoc_insertion_point(builder_scope:hbase.pb.Scan)
} }
@ -39732,7 +39912,7 @@ public final class ClientProtos {
"tion\030\003 \001(\0132\023.hbase.pb.Condition\022\023\n\013nonce" + "tion\030\003 \001(\0132\023.hbase.pb.Condition\022\023\n\013nonce" +
"_group\030\004 \001(\004\"E\n\016MutateResponse\022 \n\006result" + "_group\030\004 \001(\004\"E\n\016MutateResponse\022 \n\006result" +
"\030\001 \001(\0132\020.hbase.pb.Result\022\021\n\tprocessed\030\002 " + "\030\001 \001(\0132\020.hbase.pb.Result\022\021\n\tprocessed\030\002 " +
"\001(\010\"\331\004\n\004Scan\022 \n\006column\030\001 \003(\0132\020.hbase.pb." + "\001(\010\"\233\005\n\004Scan\022 \n\006column\030\001 \003(\0132\020.hbase.pb." +
"Column\022*\n\tattribute\030\002 \003(\0132\027.hbase.pb.Nam" + "Column\022*\n\tattribute\030\002 \003(\0132\027.hbase.pb.Nam" +
"eBytesPair\022\021\n\tstart_row\030\003 \001(\014\022\020\n\010stop_ro" + "eBytesPair\022\021\n\tstart_row\030\003 \001(\014\022\020\n\010stop_ro" +
"w\030\004 \001(\014\022 \n\006filter\030\005 \001(\0132\020.hbase.pb.Filte" + "w\030\004 \001(\014\022 \n\006filter\030\005 \001(\0132\020.hbase.pb.Filte" +
@ -39747,97 +39927,98 @@ public final class ClientProtos {
"aching\030\021 \001(\r\022\035\n\025allow_partial_results\030\022 " + "aching\030\021 \001(\r\022\035\n\025allow_partial_results\030\022 " +
"\001(\010\0226\n\rcf_time_range\030\023 \003(\0132\037.hbase.pb.Co" + "\001(\010\0226\n\rcf_time_range\030\023 \003(\0132\037.hbase.pb.Co" +
"lumnFamilyTimeRange\022\032\n\017mvcc_read_point\030\024", "lumnFamilyTimeRange\022\032\n\017mvcc_read_point\030\024",
" \001(\004:\0010\"\246\002\n\013ScanRequest\022)\n\006region\030\001 \001(\0132" + " \001(\004:\0010\022\037\n\021include_start_row\030\025 \001(\010:\004true" +
"\031.hbase.pb.RegionSpecifier\022\034\n\004scan\030\002 \001(\013" + "\022\037\n\020include_stop_row\030\026 \001(\010:\005false\"\246\002\n\013Sc" +
"2\016.hbase.pb.Scan\022\022\n\nscanner_id\030\003 \001(\004\022\026\n\016" + "anRequest\022)\n\006region\030\001 \001(\0132\031.hbase.pb.Reg" +
"number_of_rows\030\004 \001(\r\022\025\n\rclose_scanner\030\005 " + "ionSpecifier\022\034\n\004scan\030\002 \001(\0132\016.hbase.pb.Sc" +
"\001(\010\022\025\n\rnext_call_seq\030\006 \001(\004\022\037\n\027client_han" + "an\022\022\n\nscanner_id\030\003 \001(\004\022\026\n\016number_of_rows" +
"dles_partials\030\007 \001(\010\022!\n\031client_handles_he" + "\030\004 \001(\r\022\025\n\rclose_scanner\030\005 \001(\010\022\025\n\rnext_ca" +
"artbeats\030\010 \001(\010\022\032\n\022track_scan_metrics\030\t \001" + "ll_seq\030\006 \001(\004\022\037\n\027client_handles_partials\030" +
"(\010\022\024\n\005renew\030\n \001(\010:\005false\"\266\002\n\014ScanRespons" + "\007 \001(\010\022!\n\031client_handles_heartbeats\030\010 \001(\010" +
"e\022\030\n\020cells_per_result\030\001 \003(\r\022\022\n\nscanner_i" + "\022\032\n\022track_scan_metrics\030\t \001(\010\022\024\n\005renew\030\n " +
"d\030\002 \001(\004\022\024\n\014more_results\030\003 \001(\010\022\013\n\003ttl\030\004 \001", "\001(\010:\005false\"\266\002\n\014ScanResponse\022\030\n\020cells_per",
"(\r\022!\n\007results\030\005 \003(\0132\020.hbase.pb.Result\022\r\n" + "_result\030\001 \003(\r\022\022\n\nscanner_id\030\002 \001(\004\022\024\n\014mor" +
"\005stale\030\006 \001(\010\022\037\n\027partial_flag_per_result\030" + "e_results\030\003 \001(\010\022\013\n\003ttl\030\004 \001(\r\022!\n\007results\030" +
"\007 \003(\010\022\036\n\026more_results_in_region\030\010 \001(\010\022\031\n" + "\005 \003(\0132\020.hbase.pb.Result\022\r\n\005stale\030\006 \001(\010\022\037" +
"\021heartbeat_message\030\t \001(\010\022+\n\014scan_metrics" + "\n\027partial_flag_per_result\030\007 \003(\010\022\036\n\026more_" +
"\030\n \001(\0132\025.hbase.pb.ScanMetrics\022\032\n\017mvcc_re" + "results_in_region\030\010 \001(\010\022\031\n\021heartbeat_mes" +
"ad_point\030\013 \001(\004:\0010\"\240\002\n\024BulkLoadHFileReque" + "sage\030\t \001(\010\022+\n\014scan_metrics\030\n \001(\0132\025.hbase" +
"st\022)\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSpec" + ".pb.ScanMetrics\022\032\n\017mvcc_read_point\030\013 \001(\004" +
"ifier\022>\n\013family_path\030\002 \003(\0132).hbase.pb.Bu" + ":\0010\"\240\002\n\024BulkLoadHFileRequest\022)\n\006region\030\001" +
"lkLoadHFileRequest.FamilyPath\022\026\n\016assign_" + " \002(\0132\031.hbase.pb.RegionSpecifier\022>\n\013famil" +
"seq_num\030\003 \001(\010\022+\n\010fs_token\030\004 \001(\0132\031.hbase.", "y_path\030\002 \003(\0132).hbase.pb.BulkLoadHFileReq",
"pb.DelegationToken\022\022\n\nbulk_token\030\005 \001(\t\022\030" + "uest.FamilyPath\022\026\n\016assign_seq_num\030\003 \001(\010\022" +
"\n\tcopy_file\030\006 \001(\010:\005false\032*\n\nFamilyPath\022\016" + "+\n\010fs_token\030\004 \001(\0132\031.hbase.pb.DelegationT" +
"\n\006family\030\001 \002(\014\022\014\n\004path\030\002 \002(\t\"\'\n\025BulkLoad" + "oken\022\022\n\nbulk_token\030\005 \001(\t\022\030\n\tcopy_file\030\006 " +
"HFileResponse\022\016\n\006loaded\030\001 \002(\010\"V\n\017Delegat" + "\001(\010:\005false\032*\n\nFamilyPath\022\016\n\006family\030\001 \002(\014" +
"ionToken\022\022\n\nidentifier\030\001 \001(\014\022\020\n\010password" + "\022\014\n\004path\030\002 \002(\t\"\'\n\025BulkLoadHFileResponse\022" +
"\030\002 \001(\014\022\014\n\004kind\030\003 \001(\t\022\017\n\007service\030\004 \001(\t\"l\n" + "\016\n\006loaded\030\001 \002(\010\"V\n\017DelegationToken\022\022\n\nid" +
"\026PrepareBulkLoadRequest\022\'\n\ntable_name\030\001 " + "entifier\030\001 \001(\014\022\020\n\010password\030\002 \001(\014\022\014\n\004kind" +
"\002(\0132\023.hbase.pb.TableName\022)\n\006region\030\002 \001(\013" + "\030\003 \001(\t\022\017\n\007service\030\004 \001(\t\"l\n\026PrepareBulkLo" +
"2\031.hbase.pb.RegionSpecifier\"-\n\027PrepareBu" + "adRequest\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb" +
"lkLoadResponse\022\022\n\nbulk_token\030\001 \002(\t\"W\n\026Cl", ".TableName\022)\n\006region\030\002 \001(\0132\031.hbase.pb.Re",
"eanupBulkLoadRequest\022\022\n\nbulk_token\030\001 \002(\t" + "gionSpecifier\"-\n\027PrepareBulkLoadResponse" +
"\022)\n\006region\030\002 \001(\0132\031.hbase.pb.RegionSpecif" + "\022\022\n\nbulk_token\030\001 \002(\t\"W\n\026CleanupBulkLoadR" +
"ier\"\031\n\027CleanupBulkLoadResponse\"a\n\026Coproc" + "equest\022\022\n\nbulk_token\030\001 \002(\t\022)\n\006region\030\002 \001" +
"essorServiceCall\022\013\n\003row\030\001 \002(\014\022\024\n\014service" + "(\0132\031.hbase.pb.RegionSpecifier\"\031\n\027Cleanup" +
"_name\030\002 \002(\t\022\023\n\013method_name\030\003 \002(\t\022\017\n\007requ" + "BulkLoadResponse\"a\n\026CoprocessorServiceCa" +
"est\030\004 \002(\014\"B\n\030CoprocessorServiceResult\022&\n" + "ll\022\013\n\003row\030\001 \002(\014\022\024\n\014service_name\030\002 \002(\t\022\023\n" +
"\005value\030\001 \001(\0132\027.hbase.pb.NameBytesPair\"v\n" + "\013method_name\030\003 \002(\t\022\017\n\007request\030\004 \002(\014\"B\n\030C" +
"\031CoprocessorServiceRequest\022)\n\006region\030\001 \002" + "oprocessorServiceResult\022&\n\005value\030\001 \001(\0132\027" +
"(\0132\031.hbase.pb.RegionSpecifier\022.\n\004call\030\002 " + ".hbase.pb.NameBytesPair\"v\n\031CoprocessorSe" +
"\002(\0132 .hbase.pb.CoprocessorServiceCall\"o\n", "rviceRequest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.",
"\032CoprocessorServiceResponse\022)\n\006region\030\001 " + "RegionSpecifier\022.\n\004call\030\002 \002(\0132 .hbase.pb" +
"\002(\0132\031.hbase.pb.RegionSpecifier\022&\n\005value\030" + ".CoprocessorServiceCall\"o\n\032CoprocessorSe" +
"\002 \002(\0132\027.hbase.pb.NameBytesPair\"\226\001\n\006Actio" + "rviceResponse\022)\n\006region\030\001 \002(\0132\031.hbase.pb" +
"n\022\r\n\005index\030\001 \001(\r\022)\n\010mutation\030\002 \001(\0132\027.hba" + ".RegionSpecifier\022&\n\005value\030\002 \002(\0132\027.hbase." +
"se.pb.MutationProto\022\032\n\003get\030\003 \001(\0132\r.hbase" + "pb.NameBytesPair\"\226\001\n\006Action\022\r\n\005index\030\001 \001" +
".pb.Get\0226\n\014service_call\030\004 \001(\0132 .hbase.pb" + "(\r\022)\n\010mutation\030\002 \001(\0132\027.hbase.pb.Mutation" +
".CoprocessorServiceCall\"k\n\014RegionAction\022" + "Proto\022\032\n\003get\030\003 \001(\0132\r.hbase.pb.Get\0226\n\014ser" +
")\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSpecifi" + "vice_call\030\004 \001(\0132 .hbase.pb.CoprocessorSe" +
"er\022\016\n\006atomic\030\002 \001(\010\022 \n\006action\030\003 \003(\0132\020.hba" + "rviceCall\"k\n\014RegionAction\022)\n\006region\030\001 \002(" +
"se.pb.Action\"c\n\017RegionLoadStats\022\027\n\014memst", "\0132\031.hbase.pb.RegionSpecifier\022\016\n\006atomic\030\002",
"oreLoad\030\001 \001(\005:\0010\022\030\n\rheapOccupancy\030\002 \001(\005:" + " \001(\010\022 \n\006action\030\003 \003(\0132\020.hbase.pb.Action\"c" +
"\0010\022\035\n\022compactionPressure\030\003 \001(\005:\0010\"j\n\024Mul" + "\n\017RegionLoadStats\022\027\n\014memstoreLoad\030\001 \001(\005:" +
"tiRegionLoadStats\022)\n\006region\030\001 \003(\0132\031.hbas" + "\0010\022\030\n\rheapOccupancy\030\002 \001(\005:\0010\022\035\n\022compacti" +
"e.pb.RegionSpecifier\022\'\n\004stat\030\002 \003(\0132\031.hba" + "onPressure\030\003 \001(\005:\0010\"j\n\024MultiRegionLoadSt" +
"se.pb.RegionLoadStats\"\336\001\n\021ResultOrExcept" + "ats\022)\n\006region\030\001 \003(\0132\031.hbase.pb.RegionSpe" +
"ion\022\r\n\005index\030\001 \001(\r\022 \n\006result\030\002 \001(\0132\020.hba" + "cifier\022\'\n\004stat\030\002 \003(\0132\031.hbase.pb.RegionLo" +
"se.pb.Result\022*\n\texception\030\003 \001(\0132\027.hbase." + "adStats\"\336\001\n\021ResultOrException\022\r\n\005index\030\001" +
"pb.NameBytesPair\022:\n\016service_result\030\004 \001(\013" + " \001(\r\022 \n\006result\030\002 \001(\0132\020.hbase.pb.Result\022*" +
"2\".hbase.pb.CoprocessorServiceResult\0220\n\t" + "\n\texception\030\003 \001(\0132\027.hbase.pb.NameBytesPa" +
"loadStats\030\005 \001(\0132\031.hbase.pb.RegionLoadSta", "ir\022:\n\016service_result\030\004 \001(\0132\".hbase.pb.Co",
"tsB\002\030\001\"x\n\022RegionActionResult\0226\n\021resultOr" + "processorServiceResult\0220\n\tloadStats\030\005 \001(" +
"Exception\030\001 \003(\0132\033.hbase.pb.ResultOrExcep" + "\0132\031.hbase.pb.RegionLoadStatsB\002\030\001\"x\n\022Regi" +
"tion\022*\n\texception\030\002 \001(\0132\027.hbase.pb.NameB" + "onActionResult\0226\n\021resultOrException\030\001 \003(" +
"ytesPair\"x\n\014MultiRequest\022,\n\014regionAction" + "\0132\033.hbase.pb.ResultOrException\022*\n\texcept" +
"\030\001 \003(\0132\026.hbase.pb.RegionAction\022\022\n\nnonceG" + "ion\030\002 \001(\0132\027.hbase.pb.NameBytesPair\"x\n\014Mu" +
"roup\030\002 \001(\004\022&\n\tcondition\030\003 \001(\0132\023.hbase.pb" + "ltiRequest\022,\n\014regionAction\030\001 \003(\0132\026.hbase" +
".Condition\"\226\001\n\rMultiResponse\0228\n\022regionAc" + ".pb.RegionAction\022\022\n\nnonceGroup\030\002 \001(\004\022&\n\t" +
"tionResult\030\001 \003(\0132\034.hbase.pb.RegionAction" + "condition\030\003 \001(\0132\023.hbase.pb.Condition\"\226\001\n" +
"Result\022\021\n\tprocessed\030\002 \001(\010\0228\n\020regionStati" + "\rMultiResponse\0228\n\022regionActionResult\030\001 \003" +
"stics\030\003 \001(\0132\036.hbase.pb.MultiRegionLoadSt", "(\0132\034.hbase.pb.RegionActionResult\022\021\n\tproc",
"ats*\'\n\013Consistency\022\n\n\006STRONG\020\000\022\014\n\010TIMELI" + "essed\030\002 \001(\010\0228\n\020regionStatistics\030\003 \001(\0132\036." +
"NE\020\0012\263\005\n\rClientService\0222\n\003Get\022\024.hbase.pb" + "hbase.pb.MultiRegionLoadStats*\'\n\013Consist" +
".GetRequest\032\025.hbase.pb.GetResponse\022;\n\006Mu" + "ency\022\n\n\006STRONG\020\000\022\014\n\010TIMELINE\020\0012\263\005\n\rClien" +
"tate\022\027.hbase.pb.MutateRequest\032\030.hbase.pb" + "tService\0222\n\003Get\022\024.hbase.pb.GetRequest\032\025." +
".MutateResponse\0225\n\004Scan\022\025.hbase.pb.ScanR" + "hbase.pb.GetResponse\022;\n\006Mutate\022\027.hbase.p" +
"equest\032\026.hbase.pb.ScanResponse\022P\n\rBulkLo" + "b.MutateRequest\032\030.hbase.pb.MutateRespons" +
"adHFile\022\036.hbase.pb.BulkLoadHFileRequest\032" + "e\0225\n\004Scan\022\025.hbase.pb.ScanRequest\032\026.hbase" +
"\037.hbase.pb.BulkLoadHFileResponse\022V\n\017Prep" + ".pb.ScanResponse\022P\n\rBulkLoadHFile\022\036.hbas" +
"areBulkLoad\022 .hbase.pb.PrepareBulkLoadRe" + "e.pb.BulkLoadHFileRequest\032\037.hbase.pb.Bul" +
"quest\032!.hbase.pb.PrepareBulkLoadResponse", "kLoadHFileResponse\022V\n\017PrepareBulkLoad\022 .",
"\022V\n\017CleanupBulkLoad\022 .hbase.pb.CleanupBu" + "hbase.pb.PrepareBulkLoadRequest\032!.hbase." +
"lkLoadRequest\032!.hbase.pb.CleanupBulkLoad" + "pb.PrepareBulkLoadResponse\022V\n\017CleanupBul" +
"Response\022X\n\013ExecService\022#.hbase.pb.Copro" + "kLoad\022 .hbase.pb.CleanupBulkLoadRequest\032" +
"cessorServiceRequest\032$.hbase.pb.Coproces" + "!.hbase.pb.CleanupBulkLoadResponse\022X\n\013Ex" +
"sorServiceResponse\022d\n\027ExecRegionServerSe" + "ecService\022#.hbase.pb.CoprocessorServiceR" +
"rvice\022#.hbase.pb.CoprocessorServiceReque" + "equest\032$.hbase.pb.CoprocessorServiceResp" +
"st\032$.hbase.pb.CoprocessorServiceResponse" + "onse\022d\n\027ExecRegionServerService\022#.hbase." +
"\0228\n\005Multi\022\026.hbase.pb.MultiRequest\032\027.hbas" + "pb.CoprocessorServiceRequest\032$.hbase.pb." +
"e.pb.MultiResponseBB\n*org.apache.hadoop." + "CoprocessorServiceResponse\0228\n\005Multi\022\026.hb" +
"hbase.protobuf.generatedB\014ClientProtosH\001", "ase.pb.MultiRequest\032\027.hbase.pb.MultiResp",
"\210\001\001\240\001\001" "onseBB\n*org.apache.hadoop.hbase.protobuf" +
".generatedB\014ClientProtosH\001\210\001\001\240\001\001"
}; };
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@ -39927,7 +40108,7 @@ public final class ClientProtos {
internal_static_hbase_pb_Scan_fieldAccessorTable = new internal_static_hbase_pb_Scan_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable( com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_Scan_descriptor, internal_static_hbase_pb_Scan_descriptor,
new java.lang.String[] { "Column", "Attribute", "StartRow", "StopRow", "Filter", "TimeRange", "MaxVersions", "CacheBlocks", "BatchSize", "MaxResultSize", "StoreLimit", "StoreOffset", "LoadColumnFamiliesOnDemand", "Small", "Reversed", "Consistency", "Caching", "AllowPartialResults", "CfTimeRange", "MvccReadPoint", }); new java.lang.String[] { "Column", "Attribute", "StartRow", "StopRow", "Filter", "TimeRange", "MaxVersions", "CacheBlocks", "BatchSize", "MaxResultSize", "StoreLimit", "StoreOffset", "LoadColumnFamiliesOnDemand", "Small", "Reversed", "Consistency", "Caching", "AllowPartialResults", "CfTimeRange", "MvccReadPoint", "IncludeStartRow", "IncludeStopRow", });
internal_static_hbase_pb_ScanRequest_descriptor = internal_static_hbase_pb_ScanRequest_descriptor =
getDescriptor().getMessageTypes().get(12); getDescriptor().getMessageTypes().get(12);
internal_static_hbase_pb_ScanRequest_fieldAccessorTable = new internal_static_hbase_pb_ScanRequest_fieldAccessorTable = new

View File

@ -256,6 +256,8 @@ message Scan {
optional bool allow_partial_results = 18; optional bool allow_partial_results = 18;
repeated ColumnFamilyTimeRange cf_time_range = 19; repeated ColumnFamilyTimeRange cf_time_range = 19;
optional uint64 mvcc_read_point = 20 [default = 0]; optional uint64 mvcc_read_point = 20 [default = 0];
optional bool include_start_row = 21 [default = true];
optional bool include_stop_row = 22 [default = false];
} }
/** /**

View File

@ -179,8 +179,8 @@ class DefaultStoreFileManager implements StoreFileManager {
} }
@Override @Override
public final Collection<StoreFile> getFilesForScanOrGet(boolean isGet, public final Collection<StoreFile> getFilesForScan(byte[] startRow, boolean includeStartRow,
byte[] startRow, byte[] stopRow) { byte[] stopRow, boolean includeStopRow) {
// We cannot provide any useful input and already have the files sorted by seqNum. // We cannot provide any useful input and already have the files sorted by seqNum.
return getStorefiles(); return getStorefiles();
} }

View File

@ -5760,8 +5760,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
protected Cell joinedContinuationRow = null; protected Cell joinedContinuationRow = null;
private boolean filterClosed = false; private boolean filterClosed = false;
protected final int isScan;
protected final byte[] stopRow; protected final byte[] stopRow;
protected final boolean includeStopRow;
protected final HRegion region; protected final HRegion region;
protected final CellComparator comparator; protected final CellComparator comparator;
@ -5797,15 +5797,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
*/ */
defaultScannerContext = ScannerContext.newBuilder() defaultScannerContext = ScannerContext.newBuilder()
.setBatchLimit(scan.getBatch()).build(); .setBatchLimit(scan.getBatch()).build();
if (Bytes.equals(scan.getStopRow(), HConstants.EMPTY_END_ROW) && !scan.isGetScan()) {
this.stopRow = null;
} else {
this.stopRow = scan.getStopRow(); this.stopRow = scan.getStopRow();
} this.includeStopRow = scan.includeStopRow();
// If we are doing a get, we want to be [startRow,endRow]. Normally
// it is [startRow,endRow) and if startRow=endRow we get nothing.
this.isScan = scan.isGetScan() ? 1 : 0;
// synchronize on scannerReadPoints so that nobody calculates // synchronize on scannerReadPoints so that nobody calculates
// getSmallestReadPoint, before scannerReadPoints is updated. // getSmallestReadPoint, before scannerReadPoints is updated.
@ -6118,7 +6111,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
// Let's see what we have in the storeHeap. // Let's see what we have in the storeHeap.
Cell current = this.storeHeap.peek(); Cell current = this.storeHeap.peek();
boolean stopRow = isStopRow(current); boolean shouldStop = shouldStop(current);
// When has filter row is true it means that the all the cells for a particular row must be // When has filter row is true it means that the all the cells for a particular row must be
// read before a filtering decision can be made. This means that filters where hasFilterRow // read before a filtering decision can be made. This means that filters where hasFilterRow
// run the risk of enLongAddering out of memory errors in the case that they are applied to a // run the risk of enLongAddering out of memory errors in the case that they are applied to a
@ -6142,7 +6135,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
// If not, then it's main path - getting results from storeHeap. // If not, then it's main path - getting results from storeHeap.
if (joinedContinuationRow == null) { if (joinedContinuationRow == null) {
// First, check if we are at a stop row. If so, there are no more results. // First, check if we are at a stop row. If so, there are no more results.
if (stopRow) { if (shouldStop) {
if (hasFilterRow) { if (hasFilterRow) {
filter.filterRowCells(results); filter.filterRowCells(results);
} }
@ -6182,7 +6175,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
} }
Cell nextKv = this.storeHeap.peek(); Cell nextKv = this.storeHeap.peek();
stopRow = nextKv == null || isStopRow(nextKv); shouldStop = shouldStop(nextKv);
// save that the row was empty before filters applied to it. // save that the row was empty before filters applied to it.
final boolean isEmptyRow = results.isEmpty(); final boolean isEmptyRow = results.isEmpty();
@ -6219,7 +6212,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
// This row was totally filtered out, if this is NOT the last row, // This row was totally filtered out, if this is NOT the last row,
// we should continue on. Otherwise, nothing else to do. // we should continue on. Otherwise, nothing else to do.
if (!stopRow) continue; if (!shouldStop) continue;
return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues();
} }
@ -6260,10 +6253,10 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
if (!moreRows) { if (!moreRows) {
return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues();
} }
if (!stopRow) continue; if (!shouldStop) continue;
} }
if (stopRow) { if (shouldStop) {
return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues();
} else { } else {
return scannerContext.setScannerState(NextState.MORE_VALUES).hasMoreValues(); return scannerContext.setScannerState(NextState.MORE_VALUES).hasMoreValues();
@ -6343,10 +6336,15 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
.postScannerFilterRow(this, curRowCell); .postScannerFilterRow(this, curRowCell);
} }
protected boolean isStopRow(Cell currentRowCell) { protected boolean shouldStop(Cell currentRowCell) {
return currentRowCell == null if (currentRowCell == null) {
|| (stopRow != null && comparator.compareRows(currentRowCell, stopRow, 0, stopRow return true;
.length) >= isScan); }
if (stopRow == null || Bytes.equals(stopRow, HConstants.EMPTY_END_ROW)) {
return false;
}
int c = comparator.compareRows(currentRowCell, stopRow, 0, stopRow.length);
return c > 0 || (c == 0 && !includeStopRow);
} }
@Override @Override

View File

@ -18,6 +18,13 @@
*/ */
package org.apache.hadoop.hbase.regionserver; package org.apache.hadoop.hbase.regionserver;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableCollection;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import java.io.IOException; import java.io.IOException;
import java.io.InterruptedIOException; import java.io.InterruptedIOException;
import java.net.InetSocketAddress; import java.net.InetSocketAddress;
@ -90,13 +97,6 @@ import org.apache.hadoop.hbase.util.ReflectionUtils;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix; import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableCollection;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
/** /**
* A Store holds a column family in a Region. Its a memstore and a set of zero * A Store holds a column family in a Region. Its a memstore and a set of zero
* or more StoreFiles, which stretch backwards over time. * or more StoreFiles, which stretch backwards over time.
@ -1138,20 +1138,19 @@ public class HStore implements Store {
} }
/** /**
* Get all scanners with no filtering based on TTL (that happens further down * Get all scanners with no filtering based on TTL (that happens further down the line).
* the line).
* @return all scanners for this store * @return all scanners for this store
*/ */
@Override @Override
public List<KeyValueScanner> getScanners(boolean cacheBlocks, boolean isGet, public List<KeyValueScanner> getScanners(boolean cacheBlocks, boolean usePread,
boolean usePread, boolean isCompaction, ScanQueryMatcher matcher, byte[] startRow, boolean isCompaction, ScanQueryMatcher matcher, byte[] startRow, boolean includeStartRow,
byte[] stopRow, long readPt) throws IOException { byte[] stopRow, boolean includeStopRow, long readPt) throws IOException {
Collection<StoreFile> storeFilesToScan; Collection<StoreFile> storeFilesToScan;
List<KeyValueScanner> memStoreScanners; List<KeyValueScanner> memStoreScanners;
this.lock.readLock().lock(); this.lock.readLock().lock();
try { try {
storeFilesToScan = storeFilesToScan = this.storeEngine.getStoreFileManager().getFilesForScan(startRow,
this.storeEngine.getStoreFileManager().getFilesForScanOrGet(isGet, startRow, stopRow); includeStartRow, stopRow, includeStopRow);
memStoreScanners = this.memstore.getScanners(readPt); memStoreScanners = this.memstore.getScanners(readPt);
} finally { } finally {
this.lock.readLock().unlock(); this.lock.readLock().unlock();
@ -1164,8 +1163,7 @@ public class HStore implements Store {
// actually more correct, since memstore get put at the end. // actually more correct, since memstore get put at the end.
List<StoreFileScanner> sfScanners = StoreFileScanner.getScannersForStoreFiles(storeFilesToScan, List<StoreFileScanner> sfScanners = StoreFileScanner.getScannersForStoreFiles(storeFilesToScan,
cacheBlocks, usePread, isCompaction, false, matcher, readPt, isPrimaryReplicaStore()); cacheBlocks, usePread, isCompaction, false, matcher, readPt, isPrimaryReplicaStore());
List<KeyValueScanner> scanners = List<KeyValueScanner> scanners = new ArrayList<KeyValueScanner>(sfScanners.size() + 1);
new ArrayList<KeyValueScanner>(sfScanners.size()+1);
scanners.addAll(sfScanners); scanners.addAll(sfScanners);
// Then the memstore scanners // Then the memstore scanners
scanners.addAll(memStoreScanners); scanners.addAll(memStoreScanners);
@ -1174,8 +1172,9 @@ public class HStore implements Store {
@Override @Override
public List<KeyValueScanner> getScanners(List<StoreFile> files, boolean cacheBlocks, public List<KeyValueScanner> getScanners(List<StoreFile> files, boolean cacheBlocks,
boolean isGet, boolean usePread, boolean isCompaction, ScanQueryMatcher matcher, boolean usePread, boolean isCompaction, ScanQueryMatcher matcher, byte[] startRow,
byte[] startRow, byte[] stopRow, long readPt, boolean includeMemstoreScanner) throws IOException { boolean includeStartRow, byte[] stopRow, boolean includeStopRow, long readPt,
boolean includeMemstoreScanner) throws IOException {
List<KeyValueScanner> memStoreScanners = null; List<KeyValueScanner> memStoreScanners = null;
if (includeMemstoreScanner) { if (includeMemstoreScanner) {
this.lock.readLock().lock(); this.lock.readLock().lock();
@ -2234,8 +2233,8 @@ public class HStore implements Store {
if (LOG.isInfoEnabled()) { if (LOG.isInfoEnabled()) {
LOG.info("Region: " + HStore.this.getRegionInfo().getEncodedName() + LOG.info("Region: " + HStore.this.getRegionInfo().getEncodedName() +
" added " + storeFile + ", entries=" + storeFile.getReader().getEntries() + " added " + storeFile + ", entries=" + storeFile.getReader().getEntries() +
", sequenceid=" + + storeFile.getReader().getSequenceID() + ", sequenceid=" + +storeFile.getReader().getSequenceID() + ", filesize="
", filesize=" + StringUtils.humanReadableInt(storeFile.getReader().length())); + TraditionalBinaryPrefix.long2String(storeFile.getReader().length(), "", 1));
} }
} }

View File

@ -23,9 +23,11 @@ import java.util.List;
import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl; import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl;
import org.apache.hadoop.hbase.util.Bytes;
/** /**
* ReversibleRegionScannerImpl extends from RegionScannerImpl, and is used to * ReversibleRegionScannerImpl extends from RegionScannerImpl, and is used to
@ -56,10 +58,15 @@ class ReversedRegionScannerImpl extends RegionScannerImpl {
} }
@Override @Override
protected boolean isStopRow(Cell currentRowCell) { protected boolean shouldStop(Cell currentRowCell) {
return currentRowCell == null if (currentRowCell == null) {
|| (super.stopRow != null && comparator.compareRows(currentRowCell, stopRow, 0, return true;
stopRow.length) <= super.isScan); }
if (stopRow == null || Bytes.equals(stopRow, HConstants.EMPTY_START_ROW)) {
return false;
}
int c = comparator.compareRows(currentRowCell, stopRow, 0, stopRow.length);
return c < 0 || (c == 0 && !includeStopRow);
} }
@Override @Override

View File

@ -81,35 +81,45 @@ public interface Store extends HeapSize, StoreConfigInformation, PropagatingConf
throws IOException; throws IOException;
/** /**
* Get all scanners with no filtering based on TTL (that happens further down * Get all scanners with no filtering based on TTL (that happens further down the line).
* the line). * @param cacheBlocks cache the blocks or not
* @param cacheBlocks * @param usePread true to use pread, false if not
* @param isGet * @param isCompaction true if the scanner is created for compaction
* @param usePread * @param matcher the scan query matcher
* @param isCompaction * @param startRow the start row
* @param matcher * @param stopRow the stop row
* @param startRow * @param readPt the read point of the current scan
* @param stopRow
* @param readPt
* @return all scanners for this store * @return all scanners for this store
*/ */
List<KeyValueScanner> getScanners( default List<KeyValueScanner> getScanners(boolean cacheBlocks, boolean isGet, boolean usePread,
boolean cacheBlocks, boolean isCompaction, ScanQueryMatcher matcher, byte[] startRow, byte[] stopRow, long readPt)
boolean isGet, throws IOException {
boolean usePread, return getScanners(cacheBlocks, usePread, isCompaction, matcher, startRow, true, stopRow, false,
boolean isCompaction, readPt);
ScanQueryMatcher matcher, }
byte[] startRow,
byte[] stopRow, /**
long readPt * Get all scanners with no filtering based on TTL (that happens further down the line).
) throws IOException; * @param cacheBlocks cache the blocks or not
* @param usePread true to use pread, false if not
* @param isCompaction true if the scanner is created for compaction
* @param matcher the scan query matcher
* @param startRow the start row
* @param includeStartRow true to include start row, false if not
* @param stopRow the stop row
* @param includeStopRow true to include stop row, false if not
* @param readPt the read point of the current scan
* @return all scanners for this store
*/
List<KeyValueScanner> getScanners(boolean cacheBlocks, boolean usePread, boolean isCompaction,
ScanQueryMatcher matcher, byte[] startRow, boolean includeStartRow, byte[] stopRow,
boolean includeStopRow, long readPt) throws IOException;
/** /**
* Create scanners on the given files and if needed on the memstore with no filtering based on TTL * Create scanners on the given files and if needed on the memstore with no filtering based on TTL
* (that happens further down the line). * (that happens further down the line).
* @param files the list of files on which the scanners has to be created * @param files the list of files on which the scanners has to be created
* @param cacheBlocks cache the blocks or not * @param cacheBlocks cache the blocks or not
* @param isGet true if it is get, false if not
* @param usePread true to use pread, false if not * @param usePread true to use pread, false if not
* @param isCompaction true if the scanner is created for compaction * @param isCompaction true if the scanner is created for compaction
* @param matcher the scan query matcher * @param matcher the scan query matcher
@ -119,9 +129,34 @@ public interface Store extends HeapSize, StoreConfigInformation, PropagatingConf
* @param includeMemstoreScanner true if memstore has to be included * @param includeMemstoreScanner true if memstore has to be included
* @return scanners on the given files and on the memstore if specified * @return scanners on the given files and on the memstore if specified
*/ */
List<KeyValueScanner> getScanners(List<StoreFile> files, boolean cacheBlocks, boolean isGet, default List<KeyValueScanner> getScanners(List<StoreFile> files, boolean cacheBlocks,
boolean usePread, boolean isCompaction, ScanQueryMatcher matcher, byte[] startRow, boolean isGet, boolean usePread, boolean isCompaction, ScanQueryMatcher matcher,
byte[] stopRow, long readPt, boolean includeMemstoreScanner) throws IOException; byte[] startRow, byte[] stopRow, long readPt, boolean includeMemstoreScanner)
throws IOException {
return getScanners(files, cacheBlocks, usePread, isCompaction, matcher, startRow, true, stopRow,
false, readPt, includeMemstoreScanner);
}
/**
* Create scanners on the given files and if needed on the memstore with no filtering based on TTL
* (that happens further down the line).
* @param files the list of files on which the scanners has to be created
* @param cacheBlocks ache the blocks or not
* @param usePread true to use pread, false if not
* @param isCompaction true if the scanner is created for compaction
* @param matcher the scan query matcher
* @param startRow the start row
* @param includeStartRow true to include start row, false if not
* @param stopRow the stop row
* @param includeStopRow true to include stop row, false if not
* @param readPt the read point of the current scan
* @param includeMemstoreScanner true if memstore has to be included
* @return scanners on the given files and on the memstore if specified
*/
List<KeyValueScanner> getScanners(List<StoreFile> files, boolean cacheBlocks, boolean usePread,
boolean isCompaction, ScanQueryMatcher matcher, byte[] startRow, boolean includeStartRow,
byte[] stopRow, boolean includeStopRow, long readPt, boolean includeMemstoreScanner)
throws IOException;
ScanInfo getScanInfo(); ScanInfo getScanInfo();

View File

@ -105,14 +105,12 @@ public interface StoreFileManager {
/** /**
* Gets the store files to scan for a Scan or Get request. * Gets the store files to scan for a Scan or Get request.
* @param isGet Whether it's a get.
* @param startRow Start row of the request. * @param startRow Start row of the request.
* @param stopRow Stop row of the request. * @param stopRow Stop row of the request.
* @return The list of files that are to be read for this request. * @return The list of files that are to be read for this request.
*/ */
Collection<StoreFile> getFilesForScanOrGet( Collection<StoreFile> getFilesForScan(byte[] startRow, boolean includeStartRow, byte[] stopRow,
boolean isGet, byte[] startRow, byte[] stopRow boolean includeStopRow);
);
/** /**
* Gets initial, full list of candidate store files to check for row-key-before. * Gets initial, full list of candidate store files to check for row-key-before.

View File

@ -289,8 +289,8 @@ public class StripeStoreFileManager
} }
@Override @Override
public Collection<StoreFile> getFilesForScanOrGet( public Collection<StoreFile> getFilesForScan(byte[] startRow, boolean includeStartRow,
boolean isGet, byte[] startRow, byte[] stopRow) { byte[] stopRow, boolean includeStopRow) {
if (state.stripeFiles.isEmpty()) { if (state.stripeFiles.isEmpty()) {
return state.level0Files; // There's just L0. return state.level0Files; // There's just L0.
} }

View File

@ -17,10 +17,11 @@
*/ */
package org.apache.hadoop.hbase.regionserver.querymatcher; package org.apache.hadoop.hbase.regionserver.querymatcher;
import static org.apache.hadoop.hbase.HConstants.EMPTY_START_ROW;
import java.io.IOException; import java.io.IOException;
import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeepDeletedCells; import org.apache.hadoop.hbase.KeepDeletedCells;
import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.filter.Filter;
@ -45,7 +46,7 @@ public abstract class CompactionScanQueryMatcher extends ScanQueryMatcher {
protected CompactionScanQueryMatcher(ScanInfo scanInfo, DeleteTracker deletes, protected CompactionScanQueryMatcher(ScanInfo scanInfo, DeleteTracker deletes,
long readPointToUse, long oldestUnexpiredTS, long now) { long readPointToUse, long oldestUnexpiredTS, long now) {
super(HConstants.EMPTY_START_ROW, scanInfo, super(createStartKeyFromRow(EMPTY_START_ROW, scanInfo), scanInfo,
new ScanWildcardColumnTracker(scanInfo.getMinVersions(), scanInfo.getMaxVersions(), new ScanWildcardColumnTracker(scanInfo.getMinVersions(), scanInfo.getMaxVersions(),
oldestUnexpiredTS), oldestUnexpiredTS),
oldestUnexpiredTS, now); oldestUnexpiredTS, now);

View File

@ -116,7 +116,8 @@ public class LegacyScanQueryMatcher extends ScanQueryMatcher {
private LegacyScanQueryMatcher(Scan scan, ScanInfo scanInfo, ColumnTracker columns, private LegacyScanQueryMatcher(Scan scan, ScanInfo scanInfo, ColumnTracker columns,
boolean hasNullColumn, DeleteTracker deletes, ScanType scanType, long readPointToUse, boolean hasNullColumn, DeleteTracker deletes, ScanType scanType, long readPointToUse,
long earliestPutTs, long oldestUnexpiredTS, long now) { long earliestPutTs, long oldestUnexpiredTS, long now) {
super(scan.getStartRow(), scanInfo, columns, oldestUnexpiredTS, now); super(createStartKeyFromRow(scan.getStartRow(), scanInfo), scanInfo, columns, oldestUnexpiredTS,
now);
TimeRange timeRange = scan.getColumnFamilyTimeRange().get(scanInfo.getFamily()); TimeRange timeRange = scan.getColumnFamilyTimeRange().get(scanInfo.getFamily());
if (timeRange == null) { if (timeRange == null) {
this.tr = scan.getTimeRange(); this.tr = scan.getTimeRange();

View File

@ -31,7 +31,7 @@ import org.apache.hadoop.hbase.regionserver.ScanInfo;
* Query matcher for normal user scan. * Query matcher for normal user scan.
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public class NormalUserScanQueryMatcher extends UserScanQueryMatcher { public abstract class NormalUserScanQueryMatcher extends UserScanQueryMatcher {
/** Keeps track of deletes */ /** Keeps track of deletes */
private final DeleteTracker deletes; private final DeleteTracker deletes;
@ -91,6 +91,16 @@ public class NormalUserScanQueryMatcher extends UserScanQueryMatcher {
RegionCoprocessorHost regionCoprocessorHost) throws IOException { RegionCoprocessorHost regionCoprocessorHost) throws IOException {
DeleteTracker deletes = instantiateDeleteTracker(regionCoprocessorHost); DeleteTracker deletes = instantiateDeleteTracker(regionCoprocessorHost);
if (scan.isReversed()) { if (scan.isReversed()) {
if (scan.includeStopRow()) {
return new NormalUserScanQueryMatcher(scan, scanInfo, columns, hasNullColumn, deletes,
oldestUnexpiredTS, now) {
@Override
protected boolean moreRowsMayExistsAfter(int cmpToStopRow) {
return cmpToStopRow >= 0;
}
};
} else {
return new NormalUserScanQueryMatcher(scan, scanInfo, columns, hasNullColumn, deletes, return new NormalUserScanQueryMatcher(scan, scanInfo, columns, hasNullColumn, deletes,
oldestUnexpiredTS, now) { oldestUnexpiredTS, now) {
@ -99,9 +109,27 @@ public class NormalUserScanQueryMatcher extends UserScanQueryMatcher {
return cmpToStopRow > 0; return cmpToStopRow > 0;
} }
}; };
}
} else {
if (scan.includeStopRow()) {
return new NormalUserScanQueryMatcher(scan, scanInfo, columns, hasNullColumn, deletes,
oldestUnexpiredTS, now) {
@Override
protected boolean moreRowsMayExistsAfter(int cmpToStopRow) {
return cmpToStopRow <= 0;
}
};
} else { } else {
return new NormalUserScanQueryMatcher(scan, scanInfo, columns, hasNullColumn, deletes, return new NormalUserScanQueryMatcher(scan, scanInfo, columns, hasNullColumn, deletes,
oldestUnexpiredTS, now); oldestUnexpiredTS, now) {
@Override
protected boolean moreRowsMayExistsAfter(int cmpToStopRow) {
return cmpToStopRow < 0;
}
};
}
} }
} }
} }

View File

@ -28,7 +28,7 @@ import org.apache.hadoop.hbase.regionserver.ScanInfo;
* Query matcher for raw scan. * Query matcher for raw scan.
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public class RawScanQueryMatcher extends UserScanQueryMatcher { public abstract class RawScanQueryMatcher extends UserScanQueryMatcher {
protected RawScanQueryMatcher(Scan scan, ScanInfo scanInfo, ColumnTracker columns, protected RawScanQueryMatcher(Scan scan, ScanInfo scanInfo, ColumnTracker columns,
boolean hasNullColumn, long oldestUnexpiredTS, long now) { boolean hasNullColumn, long oldestUnexpiredTS, long now) {
@ -63,6 +63,16 @@ public class RawScanQueryMatcher extends UserScanQueryMatcher {
public static RawScanQueryMatcher create(Scan scan, ScanInfo scanInfo, ColumnTracker columns, public static RawScanQueryMatcher create(Scan scan, ScanInfo scanInfo, ColumnTracker columns,
boolean hasNullColumn, long oldestUnexpiredTS, long now) { boolean hasNullColumn, long oldestUnexpiredTS, long now) {
if (scan.isReversed()) { if (scan.isReversed()) {
if (scan.includeStopRow()) {
return new RawScanQueryMatcher(scan, scanInfo, columns, hasNullColumn, oldestUnexpiredTS,
now) {
@Override
protected boolean moreRowsMayExistsAfter(int cmpToStopRow) {
return cmpToStopRow >= 0;
}
};
} else {
return new RawScanQueryMatcher(scan, scanInfo, columns, hasNullColumn, oldestUnexpiredTS, return new RawScanQueryMatcher(scan, scanInfo, columns, hasNullColumn, oldestUnexpiredTS,
now) { now) {
@ -71,9 +81,27 @@ public class RawScanQueryMatcher extends UserScanQueryMatcher {
return cmpToStopRow > 0; return cmpToStopRow > 0;
} }
}; };
}
} else {
if (scan.includeStopRow()) {
return new RawScanQueryMatcher(scan, scanInfo, columns, hasNullColumn, oldestUnexpiredTS,
now) {
@Override
protected boolean moreRowsMayExistsAfter(int cmpToStopRow) {
return cmpToStopRow <= 0;
}
};
} else { } else {
return new RawScanQueryMatcher(scan, scanInfo, columns, hasNullColumn, oldestUnexpiredTS, return new RawScanQueryMatcher(scan, scanInfo, columns, hasNullColumn, oldestUnexpiredTS,
now); now) {
@Override
protected boolean moreRowsMayExistsAfter(int cmpToStopRow) {
return cmpToStopRow < 0;
}
};
}
} }
} }
} }

View File

@ -127,10 +127,10 @@ public abstract class ScanQueryMatcher implements ShipperListener {
protected boolean stickyNextRow; protected boolean stickyNextRow;
protected ScanQueryMatcher(byte[] startRow, ScanInfo scanInfo, ColumnTracker columns, protected ScanQueryMatcher(Cell startKey, ScanInfo scanInfo, ColumnTracker columns,
long oldestUnexpiredTS, long now) { long oldestUnexpiredTS, long now) {
this.rowComparator = scanInfo.getComparator(); this.rowComparator = scanInfo.getComparator();
this.startKey = CellUtil.createFirstDeleteFamilyCellOnRow(startRow, scanInfo.getFamily()); this.startKey = startKey;
this.oldestUnexpiredTS = oldestUnexpiredTS; this.oldestUnexpiredTS = oldestUnexpiredTS;
this.now = now; this.now = now;
this.columns = columns; this.columns = columns;
@ -345,6 +345,10 @@ public abstract class ScanQueryMatcher implements ShipperListener {
} }
} }
protected static Cell createStartKeyFromRow(byte[] startRow, ScanInfo scanInfo) {
return CellUtil.createFirstDeleteFamilyCellOnRow(startRow, scanInfo.getFamily());
}
protected static DeleteTracker instantiateDeleteTracker(RegionCoprocessorHost host) protected static DeleteTracker instantiateDeleteTracker(RegionCoprocessorHost host)
throws IOException { throws IOException {
DeleteTracker tracker = new ScanDeleteTracker(); DeleteTracker tracker = new ScanDeleteTracker();

View File

@ -21,6 +21,7 @@ import java.io.IOException;
import java.util.NavigableSet; import java.util.NavigableSet;
import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.filter.Filter;
@ -50,9 +51,17 @@ public abstract class UserScanQueryMatcher extends ScanQueryMatcher {
protected final TimeRange tr; protected final TimeRange tr;
private static Cell createStartKey(Scan scan, ScanInfo scanInfo) {
if (scan.includeStartRow()) {
return createStartKeyFromRow(scan.getStartRow(), scanInfo);
} else {
return CellUtil.createLastOnRow(scan.getStartRow());
}
}
protected UserScanQueryMatcher(Scan scan, ScanInfo scanInfo, ColumnTracker columns, protected UserScanQueryMatcher(Scan scan, ScanInfo scanInfo, ColumnTracker columns,
boolean hasNullColumn, long oldestUnexpiredTS, long now) { boolean hasNullColumn, long oldestUnexpiredTS, long now) {
super(scan.getStartRow(), scanInfo, columns, oldestUnexpiredTS, now); super(createStartKey(scan, scanInfo), scanInfo, columns, oldestUnexpiredTS, now);
this.hasNullColumn = hasNullColumn; this.hasNullColumn = hasNullColumn;
this.filter = scan.getFilter(); this.filter = scan.getFilter();
this.stopRow = scan.getStopRow(); this.stopRow = scan.getStopRow();
@ -163,9 +172,7 @@ public abstract class UserScanQueryMatcher extends ScanQueryMatcher {
protected abstract boolean isGet(); protected abstract boolean isGet();
protected boolean moreRowsMayExistsAfter(int cmpToStopRow) { protected abstract boolean moreRowsMayExistsAfter(int cmpToStopRow);
return cmpToStopRow < 0;
}
@Override @Override
public boolean moreRowsMayExistAfter(Cell cell) { public boolean moreRowsMayExistAfter(Cell cell) {

View File

@ -120,7 +120,7 @@ public abstract class AbstractTestAsyncTableScan {
public void testScanNoStopKey() throws Exception { public void testScanNoStopKey() throws Exception {
int start = 345; int start = 345;
List<Result> results = List<Result> results =
doScan(createScan().setStartRow(Bytes.toBytes(String.format("%03d", start)))); doScan(createScan().withStartRow(Bytes.toBytes(String.format("%03d", start))));
assertEquals(COUNT - start, results.size()); assertEquals(COUNT - start, results.size());
IntStream.range(0, COUNT - start).forEach(i -> assertResultEquals(results.get(i), start + i)); IntStream.range(0, COUNT - start).forEach(i -> assertResultEquals(results.get(i), start + i));
} }
@ -129,44 +129,66 @@ public abstract class AbstractTestAsyncTableScan {
public void testReverseScanNoStopKey() throws Exception { public void testReverseScanNoStopKey() throws Exception {
int start = 765; int start = 765;
List<Result> results = doScan( List<Result> results = doScan(
createScan().setStartRow(Bytes.toBytes(String.format("%03d", start))).setReversed(true)); createScan().withStartRow(Bytes.toBytes(String.format("%03d", start))).setReversed(true));
assertEquals(start + 1, results.size()); assertEquals(start + 1, results.size());
IntStream.range(0, start + 1).forEach(i -> assertResultEquals(results.get(i), start - i)); IntStream.range(0, start + 1).forEach(i -> assertResultEquals(results.get(i), start - i));
} }
private void testScan(int start, int stop) throws Exception { private void testScan(int start, boolean startInclusive, int stop, boolean stopInclusive)
List<Result> results = throws Exception {
doScan(createScan().setStartRow(Bytes.toBytes(String.format("%03d", start))) List<Result> results = doScan(
.setStopRow(Bytes.toBytes(String.format("%03d", stop)))); createScan().withStartRow(Bytes.toBytes(String.format("%03d", start)), startInclusive)
assertEquals(stop - start, results.size()); .withStopRow(Bytes.toBytes(String.format("%03d", stop)), stopInclusive));
IntStream.range(0, stop - start).forEach(i -> assertResultEquals(results.get(i), start + i)); int actualStart = startInclusive ? start : start + 1;
int actualStop = stopInclusive ? stop + 1 : stop;
assertEquals(actualStop - actualStart, results.size());
IntStream.range(0, actualStop - actualStart)
.forEach(i -> assertResultEquals(results.get(i), actualStart + i));
} }
private void testReversedScan(int start, int stop) throws Exception { private void testReversedScan(int start, boolean startInclusive, int stop, boolean stopInclusive)
List<Result> results = throws Exception {
doScan(createScan().setStartRow(Bytes.toBytes(String.format("%03d", start))) List<Result> results = doScan(createScan()
.setStopRow(Bytes.toBytes(String.format("%03d", stop))).setReversed(true)); .withStartRow(Bytes.toBytes(String.format("%03d", start)), startInclusive)
assertEquals(start - stop, results.size()); .withStopRow(Bytes.toBytes(String.format("%03d", stop)), stopInclusive).setReversed(true));
IntStream.range(0, start - stop).forEach(i -> assertResultEquals(results.get(i), start - i)); int actualStart = startInclusive ? start : start - 1;
int actualStop = stopInclusive ? stop - 1 : stop;
assertEquals(actualStart - actualStop, results.size());
IntStream.range(0, actualStart - actualStop)
.forEach(i -> assertResultEquals(results.get(i), actualStart - i));
} }
@Test @Test
public void testScanWithStartKeyAndStopKey() throws Exception { public void testScanWithStartKeyAndStopKey() throws Exception {
testScan(345, 567); testScan(1, true, 998, false); // from first region to last region
testScan(123, true, 345, true);
testScan(234, true, 456, false);
testScan(345, false, 567, true);
testScan(456, false, 678, false);
} }
@Test @Test
public void testReversedScanWithStartKeyAndStopKey() throws Exception { public void testReversedScanWithStartKeyAndStopKey() throws Exception {
testReversedScan(765, 543); testReversedScan(998, true, 1, false); // from first region to first region
testReversedScan(543, true, 321, true);
testReversedScan(654, true, 432, false);
testReversedScan(765, false, 543, true);
testReversedScan(876, false, 654, false);
} }
@Test @Test
public void testScanAtRegionBoundary() throws Exception { public void testScanAtRegionBoundary() throws Exception {
testScan(222, 333); testScan(222, true, 333, true);
testScan(333, true, 444, false);
testScan(444, false, 555, true);
testScan(555, false, 666, false);
} }
@Test @Test
public void testReversedScanAtRegionBoundary() throws Exception { public void testReversedScanAtRegionBoundary() throws Exception {
testScan(222, 333); testReversedScan(333, true, 222, true);
testReversedScan(444, true, 333, false);
testReversedScan(555, false, 444, true);
testReversedScan(666, false, 555, false);
} }
} }

View File

@ -25,6 +25,7 @@ import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.List; import java.util.List;
import java.util.Queue; import java.util.Queue;
import java.util.concurrent.TimeUnit;
import java.util.function.Supplier; import java.util.function.Supplier;
import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.ClientTests;
@ -92,13 +93,17 @@ public class TestRawAsyncTableScan extends AbstractTestAsyncTableScan {
} }
} }
@Parameter @Parameter(0)
public String scanType;
@Parameter(1)
public Supplier<Scan> scanCreater; public Supplier<Scan> scanCreater;
@Parameters @Parameters(name = "{index}: type={0}")
public static List<Object[]> params() { public static List<Object[]> params() {
return Arrays.asList(new Supplier<?>[] { TestRawAsyncTableScan::createNormalScan }, Supplier<Scan> normal = TestRawAsyncTableScan::createNormalScan;
new Supplier<?>[] { TestRawAsyncTableScan::createBatchScan }); Supplier<Scan> batch = TestRawAsyncTableScan::createBatchScan;
return Arrays.asList(new Object[] { "normal", normal }, new Object[] { "batch", batch });
} }
private static Scan createNormalScan() { private static Scan createNormalScan() {
@ -117,7 +122,10 @@ public class TestRawAsyncTableScan extends AbstractTestAsyncTableScan {
@Override @Override
protected List<Result> doScan(Scan scan) throws Exception { protected List<Result> doScan(Scan scan) throws Exception {
SimpleRawScanResultConsumer scanConsumer = new SimpleRawScanResultConsumer(); SimpleRawScanResultConsumer scanConsumer = new SimpleRawScanResultConsumer();
ASYNC_CONN.getRawTable(TABLE_NAME).scan(scan, scanConsumer); RawAsyncTable table = ASYNC_CONN.getRawTable(TABLE_NAME);
table.setScanTimeout(1, TimeUnit.HOURS);
table.setReadRpcTimeout(1, TimeUnit.HOURS);
table.scan(scan, scanConsumer);
List<Result> results = new ArrayList<>(); List<Result> results = new ArrayList<>();
for (Result result; (result = scanConsumer.take()) != null;) { for (Result result; (result = scanConsumer.take()) != null;) {
results.add(result); results.add(result);

View File

@ -89,15 +89,15 @@ public class TestStripeStoreFileManager {
MockStoreFile sf = createFile(); MockStoreFile sf = createFile();
manager.insertNewFiles(al(sf)); manager.insertNewFiles(al(sf));
assertEquals(1, manager.getStorefileCount()); assertEquals(1, manager.getStorefileCount());
Collection<StoreFile> filesForGet = manager.getFilesForScanOrGet(true, KEY_A, KEY_A); Collection<StoreFile> filesForGet = manager.getFilesForScan(KEY_A, true, KEY_A, true);
assertEquals(1, filesForGet.size()); assertEquals(1, filesForGet.size());
assertTrue(filesForGet.contains(sf)); assertTrue(filesForGet.contains(sf));
// Add some stripes and make sure we get this file for every stripe. // Add some stripes and make sure we get this file for every stripe.
manager.addCompactionResults(al(), al(createFile(OPEN_KEY, KEY_B), manager.addCompactionResults(al(), al(createFile(OPEN_KEY, KEY_B),
createFile(KEY_B, OPEN_KEY))); createFile(KEY_B, OPEN_KEY)));
assertTrue(manager.getFilesForScanOrGet(true, KEY_A, KEY_A).contains(sf)); assertTrue(manager.getFilesForScan(KEY_A, true, KEY_A, true).contains(sf));
assertTrue(manager.getFilesForScanOrGet(true, KEY_C, KEY_C).contains(sf)); assertTrue(manager.getFilesForScan(KEY_C, true, KEY_C, true).contains(sf));
} }
@Test @Test
@ -290,10 +290,9 @@ public class TestStripeStoreFileManager {
verifyGetAndScanScenario(manager, keyAfter(KEY_B), keyAfter(KEY_C), sf0, sfC, sfD); verifyGetAndScanScenario(manager, keyAfter(KEY_B), keyAfter(KEY_C), sf0, sfC, sfD);
} }
private void verifyGetAndScanScenario(StripeStoreFileManager manager, private void verifyGetAndScanScenario(StripeStoreFileManager manager, byte[] start, byte[] end,
byte[] start, byte[] end, StoreFile... results) throws Exception { StoreFile... results) throws Exception {
verifyGetOrScanScenario(manager, true, start, end, results); verifyGetOrScanScenario(manager, start, end, results);
verifyGetOrScanScenario(manager, false, start, end, results);
} }
@Test @Test
@ -548,16 +547,16 @@ public class TestStripeStoreFileManager {
verifyAllFiles(manager, allFiles); // must have the same files. verifyAllFiles(manager, allFiles); // must have the same files.
} }
private void verifyGetOrScanScenario(StripeStoreFileManager manager, boolean isGet, private void verifyGetOrScanScenario(StripeStoreFileManager manager, byte[] start, byte[] end,
byte[] start, byte[] end, StoreFile... results) throws Exception { StoreFile... results) throws Exception {
verifyGetOrScanScenario(manager, isGet, start, end, Arrays.asList(results)); verifyGetOrScanScenario(manager, start, end, Arrays.asList(results));
} }
private void verifyGetOrScanScenario(StripeStoreFileManager manager, boolean isGet, private void verifyGetOrScanScenario(StripeStoreFileManager manager, byte[] start, byte[] end,
byte[] start, byte[] end, Collection<StoreFile> results) throws Exception { Collection<StoreFile> results) throws Exception {
start = start != null ? start : HConstants.EMPTY_START_ROW; start = start != null ? start : HConstants.EMPTY_START_ROW;
end = end != null ? end : HConstants.EMPTY_END_ROW; end = end != null ? end : HConstants.EMPTY_END_ROW;
Collection<StoreFile> sfs = manager.getFilesForScanOrGet(isGet, start, end); Collection<StoreFile> sfs = manager.getFilesForScan(start, true, end, false);
assertEquals(results.size(), sfs.size()); assertEquals(results.size(), sfs.size());
for (StoreFile result : results) { for (StoreFile result : results) {
assertTrue(sfs.contains(result)); assertTrue(sfs.contains(result));
@ -566,7 +565,7 @@ public class TestStripeStoreFileManager {
private void verifyAllFiles( private void verifyAllFiles(
StripeStoreFileManager manager, Collection<StoreFile> results) throws Exception { StripeStoreFileManager manager, Collection<StoreFile> results) throws Exception {
verifyGetOrScanScenario(manager, false, null, null, results); verifyGetOrScanScenario(manager, null, null, results);
} }
// TODO: replace with Mockito? // TODO: replace with Mockito?