HBASE-26545 Implement tracing of scan
* on `AsyncTable`, both `scan` and `scanAll` methods should result in `SCAN` table operations. * the span of the `SCAN` table operation should have children representing all the RPC calls involved in servicing the scan. * when a user provides custom implementation of `AdvancedScanResultConsumer`, any spans emitted from the callback methods should also be tied to the span that represents the `SCAN` table operation. This is easily done because these callbacks are executed on the RPC thread. * when a user provides a custom implementation of `ScanResultConsumer`, any spans emitted from the callback methods should be also be tied to the span that represents the `SCAN` table operation. This accomplished by carefully passing the span instance around after it is created. Signed-off-by: Andrew Purtell <apurtell@apache.org> Signed-off-by: Duo Zhang <zhangduo@apache.org>
This commit is contained in:
parent
69ea6f579f
commit
235308d8bf
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -28,6 +28,9 @@ import static org.apache.hadoop.hbase.client.ConnectionUtils.isRemote;
|
|||
import static org.apache.hadoop.hbase.client.ConnectionUtils.timelineConsistentRead;
|
||||
import static org.apache.hadoop.hbase.util.FutureUtils.addListener;
|
||||
|
||||
import io.opentelemetry.api.trace.Span;
|
||||
import io.opentelemetry.api.trace.StatusCode;
|
||||
import io.opentelemetry.context.Scope;
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
@ -35,7 +38,9 @@ import java.util.concurrent.atomic.AtomicInteger;
|
|||
import org.apache.hadoop.hbase.HRegionLocation;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
|
||||
import org.apache.hadoop.hbase.client.trace.TableOperationSpanBuilder;
|
||||
import org.apache.hadoop.hbase.ipc.HBaseRpcController;
|
||||
import org.apache.hadoop.hbase.trace.TraceUtil;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
||||
import org.apache.hbase.thirdparty.io.netty.util.Timer;
|
||||
|
@ -85,6 +90,8 @@ class AsyncClientScanner {
|
|||
|
||||
private final ScanResultCache resultCache;
|
||||
|
||||
private final Span span;
|
||||
|
||||
public AsyncClientScanner(Scan scan, AdvancedScanResultConsumer consumer, TableName tableName,
|
||||
AsyncConnectionImpl conn, Timer retryTimer, long pauseNs, long pauseForCQTBENs,
|
||||
int maxAttempts, long scanTimeoutNs, long rpcTimeoutNs, int startLogErrorsCnt) {
|
||||
|
@ -112,6 +119,18 @@ class AsyncClientScanner {
|
|||
} else {
|
||||
this.scanMetrics = null;
|
||||
}
|
||||
|
||||
/*
|
||||
* Assumes that the `start()` method is called immediately after construction. If this is no
|
||||
* longer the case, for tracing correctness, we should move the start of the span into the
|
||||
* `start()` method. The cost of doing so would be making access to the `span` safe for
|
||||
* concurrent threads.
|
||||
*/
|
||||
span = new TableOperationSpanBuilder(conn).setTableName(tableName).setOperation(scan).build();
|
||||
if (consumer instanceof AsyncTableResultScanner) {
|
||||
AsyncTableResultScanner scanner = (AsyncTableResultScanner) consumer;
|
||||
scanner.setSpan(span);
|
||||
}
|
||||
}
|
||||
|
||||
private static final class OpenScannerResponse {
|
||||
|
@ -140,6 +159,7 @@ class AsyncClientScanner {
|
|||
|
||||
private CompletableFuture<OpenScannerResponse> callOpenScanner(HBaseRpcController controller,
|
||||
HRegionLocation loc, ClientService.Interface stub) {
|
||||
try (Scope ignored = span.makeCurrent()) {
|
||||
boolean isRegionServerRemote = isRemote(loc.getHostname());
|
||||
incRPCCallsMetrics(scanMetrics, isRegionServerRemote);
|
||||
if (openScannerTries.getAndIncrement() > 1) {
|
||||
|
@ -147,20 +167,28 @@ class AsyncClientScanner {
|
|||
}
|
||||
CompletableFuture<OpenScannerResponse> future = new CompletableFuture<>();
|
||||
try {
|
||||
ScanRequest request = RequestConverter.buildScanRequest(loc.getRegion().getRegionName(), scan,
|
||||
scan.getCaching(), false);
|
||||
ScanRequest request = RequestConverter.buildScanRequest(loc.getRegion().getRegionName(),
|
||||
scan, scan.getCaching(), false);
|
||||
stub.scan(controller, request, resp -> {
|
||||
try (Scope ignored1 = span.makeCurrent()) {
|
||||
if (controller.failed()) {
|
||||
future.completeExceptionally(controller.getFailed());
|
||||
final IOException e = controller.getFailed();
|
||||
future.completeExceptionally(e);
|
||||
TraceUtil.setError(span, e);
|
||||
span.end();
|
||||
return;
|
||||
}
|
||||
future.complete(new OpenScannerResponse(loc, isRegionServerRemote, stub, controller, resp));
|
||||
future.complete(
|
||||
new OpenScannerResponse(loc, isRegionServerRemote, stub, controller, resp));
|
||||
}
|
||||
});
|
||||
} catch (IOException e) {
|
||||
// span is closed by listener attached to the Future in `openScanner()`
|
||||
future.completeExceptionally(e);
|
||||
}
|
||||
return future;
|
||||
}
|
||||
}
|
||||
|
||||
private void startScan(OpenScannerResponse resp) {
|
||||
addListener(
|
||||
|
@ -173,26 +201,40 @@ class AsyncClientScanner {
|
|||
.pauseForCQTBE(pauseForCQTBENs, TimeUnit.NANOSECONDS).maxAttempts(maxAttempts)
|
||||
.startLogErrorsCnt(startLogErrorsCnt).start(resp.controller, resp.resp),
|
||||
(hasMore, error) -> {
|
||||
try (Scope ignored = span.makeCurrent()) {
|
||||
if (error != null) {
|
||||
try {
|
||||
consumer.onError(error);
|
||||
return;
|
||||
} finally {
|
||||
TraceUtil.setError(span, error);
|
||||
span.end();
|
||||
}
|
||||
}
|
||||
if (hasMore) {
|
||||
openScanner();
|
||||
} else {
|
||||
try {
|
||||
consumer.onComplete();
|
||||
} finally {
|
||||
span.setStatus(StatusCode.OK);
|
||||
span.end();
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private CompletableFuture<OpenScannerResponse> openScanner(int replicaId) {
|
||||
try (Scope ignored = span.makeCurrent()) {
|
||||
return conn.callerFactory.<OpenScannerResponse> single().table(tableName)
|
||||
.row(scan.getStartRow()).replicaId(replicaId).locateType(getLocateType(scan))
|
||||
.priority(scan.getPriority())
|
||||
.rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS)
|
||||
.operationTimeout(scanTimeoutNs, TimeUnit.NANOSECONDS).pause(pauseNs, TimeUnit.NANOSECONDS)
|
||||
.pauseForCQTBE(pauseForCQTBENs, TimeUnit.NANOSECONDS).maxAttempts(maxAttempts)
|
||||
.startLogErrorsCnt(startLogErrorsCnt).action(this::callOpenScanner).call();
|
||||
.priority(scan.getPriority()).rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS)
|
||||
.operationTimeout(scanTimeoutNs, TimeUnit.NANOSECONDS)
|
||||
.pause(pauseNs, TimeUnit.NANOSECONDS).pauseForCQTBE(pauseForCQTBENs, TimeUnit.NANOSECONDS)
|
||||
.maxAttempts(maxAttempts).startLogErrorsCnt(startLogErrorsCnt)
|
||||
.action(this::callOpenScanner).call();
|
||||
}
|
||||
}
|
||||
|
||||
private long getPrimaryTimeoutNs() {
|
||||
|
@ -206,15 +248,24 @@ class AsyncClientScanner {
|
|||
addListener(timelineConsistentRead(conn.getLocator(), tableName, scan, scan.getStartRow(),
|
||||
getLocateType(scan), this::openScanner, rpcTimeoutNs, getPrimaryTimeoutNs(), retryTimer,
|
||||
conn.getConnectionMetrics()), (resp, error) -> {
|
||||
try (Scope ignored = span.makeCurrent()) {
|
||||
if (error != null) {
|
||||
try {
|
||||
consumer.onError(error);
|
||||
return;
|
||||
} finally {
|
||||
TraceUtil.setError(span, error);
|
||||
span.end();
|
||||
}
|
||||
}
|
||||
startScan(resp);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
public void start() {
|
||||
try (Scope ignored = span.makeCurrent()) {
|
||||
openScanner();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -28,6 +28,8 @@ import static org.apache.hadoop.hbase.client.ConnectionUtils.translateException;
|
|||
import static org.apache.hadoop.hbase.client.ConnectionUtils.updateResultsMetrics;
|
||||
import static org.apache.hadoop.hbase.client.ConnectionUtils.updateServerSideMetrics;
|
||||
|
||||
import io.opentelemetry.context.Context;
|
||||
import io.opentelemetry.context.Scope;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
@ -170,8 +172,8 @@ class AsyncScanSingleRegionRpcRetryingCaller {
|
|||
|
||||
private void preCheck() {
|
||||
Preconditions.checkState(Thread.currentThread() == callerThread,
|
||||
"The current thread is %s, expected thread is %s, " +
|
||||
"you should not call this method outside onNext or onHeartbeat",
|
||||
"The current thread is %s, expected thread is %s, "
|
||||
+ "you should not call this method outside onNext or onHeartbeat",
|
||||
Thread.currentThread(), callerThread);
|
||||
Preconditions.checkState(state.equals(ScanControllerState.INITIALIZED),
|
||||
"Invalid Stopper state %s", state);
|
||||
|
@ -352,9 +354,9 @@ class AsyncScanSingleRegionRpcRetryingCaller {
|
|||
ScanRequest req = RequestConverter.buildScanRequest(this.scannerId, 0, true, false);
|
||||
stub.scan(controller, req, resp -> {
|
||||
if (controller.failed()) {
|
||||
LOG.warn("Call to " + loc.getServerName() + " for closing scanner id = " + scannerId +
|
||||
" for " + loc.getRegion().getEncodedName() + " of " +
|
||||
loc.getRegion().getTable() + " failed, ignore, probably already closed",
|
||||
LOG.warn("Call to " + loc.getServerName() + " for closing scanner id = " + scannerId
|
||||
+ " for " + loc.getRegion().getEncodedName() + " of " + loc.getRegion().getTable()
|
||||
+ " failed, ignore, probably already closed",
|
||||
controller.getFailed());
|
||||
}
|
||||
});
|
||||
|
@ -392,16 +394,16 @@ class AsyncScanSingleRegionRpcRetryingCaller {
|
|||
private void onError(Throwable error) {
|
||||
error = translateException(error);
|
||||
if (tries > startLogErrorsCnt) {
|
||||
LOG.warn("Call to " + loc.getServerName() + " for scanner id = " + scannerId + " for " +
|
||||
loc.getRegion().getEncodedName() + " of " + loc.getRegion().getTable() +
|
||||
" failed, , tries = " + tries + ", maxAttempts = " + maxAttempts + ", timeout = " +
|
||||
TimeUnit.NANOSECONDS.toMillis(scanTimeoutNs) + " ms, time elapsed = " + elapsedMs() +
|
||||
" ms",
|
||||
LOG.warn("Call to " + loc.getServerName() + " for scanner id = " + scannerId + " for "
|
||||
+ loc.getRegion().getEncodedName() + " of " + loc.getRegion().getTable()
|
||||
+ " failed, , tries = " + tries + ", maxAttempts = " + maxAttempts + ", timeout = "
|
||||
+ TimeUnit.NANOSECONDS.toMillis(scanTimeoutNs) + " ms, time elapsed = " + elapsedMs()
|
||||
+ " ms",
|
||||
error);
|
||||
}
|
||||
boolean scannerClosed =
|
||||
error instanceof UnknownScannerException || error instanceof NotServingRegionException ||
|
||||
error instanceof RegionServerStoppedException || error instanceof ScannerResetException;
|
||||
boolean scannerClosed = error instanceof UnknownScannerException
|
||||
|| error instanceof NotServingRegionException
|
||||
|| error instanceof RegionServerStoppedException || error instanceof ScannerResetException;
|
||||
RetriesExhaustedException.ThrowableWithExtraContext qt =
|
||||
new RetriesExhaustedException.ThrowableWithExtraContext(error,
|
||||
EnvironmentEdgeManager.currentTime(), "");
|
||||
|
@ -573,7 +575,12 @@ class AsyncScanSingleRegionRpcRetryingCaller {
|
|||
resetController(controller, callTimeoutNs, priority);
|
||||
ScanRequest req = RequestConverter.buildScanRequest(scannerId, scan.getCaching(), false,
|
||||
nextCallSeq, scan.isScanMetricsEnabled(), false, scan.getLimit());
|
||||
stub.scan(controller, req, resp -> onComplete(controller, resp));
|
||||
final Context context = Context.current();
|
||||
stub.scan(controller, req, resp -> {
|
||||
try (Scope ignored = context.makeCurrent()) {
|
||||
onComplete(controller, resp);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private void next() {
|
||||
|
|
|
@ -18,8 +18,11 @@
|
|||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import static java.util.stream.Collectors.toList;
|
||||
|
||||
import com.google.protobuf.RpcChannel;
|
||||
import io.opentelemetry.api.trace.Span;
|
||||
import io.opentelemetry.context.Context;
|
||||
import io.opentelemetry.context.Scope;
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
|
@ -177,8 +180,7 @@ class AsyncTableImpl implements AsyncTable<ScanResultConsumer> {
|
|||
public CheckAndMutateWithFilterBuilder checkAndMutate(byte[] row, Filter filter) {
|
||||
return new CheckAndMutateWithFilterBuilder() {
|
||||
|
||||
private final CheckAndMutateWithFilterBuilder builder =
|
||||
rawTable.checkAndMutate(row, filter);
|
||||
private final CheckAndMutateWithFilterBuilder builder = rawTable.checkAndMutate(row, filter);
|
||||
|
||||
@Override
|
||||
public CheckAndMutateWithFilterBuilder timeRange(TimeRange timeRange) {
|
||||
|
@ -209,10 +211,9 @@ class AsyncTableImpl implements AsyncTable<ScanResultConsumer> {
|
|||
}
|
||||
|
||||
@Override
|
||||
public List<CompletableFuture<CheckAndMutateResult>> checkAndMutate(
|
||||
List<CheckAndMutate> checkAndMutates) {
|
||||
return rawTable.checkAndMutate(checkAndMutates).stream()
|
||||
.map(this::wrap).collect(toList());
|
||||
public List<CompletableFuture<CheckAndMutateResult>>
|
||||
checkAndMutate(List<CheckAndMutate> checkAndMutates) {
|
||||
return rawTable.checkAndMutate(checkAndMutates).stream().map(this::wrap).collect(toList());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -231,7 +232,10 @@ class AsyncTableImpl implements AsyncTable<ScanResultConsumer> {
|
|||
}
|
||||
|
||||
private void scan0(Scan scan, ScanResultConsumer consumer) {
|
||||
try (ResultScanner scanner = getScanner(scan)) {
|
||||
Span span = null;
|
||||
try (AsyncTableResultScanner scanner = rawTable.getScanner(scan)) {
|
||||
span = scanner.getSpan();
|
||||
try (Scope ignored = span.makeCurrent()) {
|
||||
consumer.onScanMetricsCreated(scanner.getScanMetrics());
|
||||
for (Result result; (result = scanner.next()) != null;) {
|
||||
if (!consumer.onNext(result)) {
|
||||
|
@ -239,14 +243,18 @@ class AsyncTableImpl implements AsyncTable<ScanResultConsumer> {
|
|||
}
|
||||
}
|
||||
consumer.onComplete();
|
||||
}
|
||||
} catch (IOException e) {
|
||||
try (Scope ignored = span.makeCurrent()) {
|
||||
consumer.onError(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void scan(Scan scan, ScanResultConsumer consumer) {
|
||||
pool.execute(() -> scan0(scan, consumer));
|
||||
final Context context = Context.current();
|
||||
pool.execute(context.wrap(() -> scan0(scan, consumer)));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.client;
|
|||
|
||||
import static org.apache.hadoop.hbase.client.ConnectionUtils.calcEstimatedSize;
|
||||
|
||||
import io.opentelemetry.api.trace.Span;
|
||||
import java.io.IOException;
|
||||
import java.io.InterruptedIOException;
|
||||
import java.util.ArrayDeque;
|
||||
|
@ -58,6 +59,9 @@ class AsyncTableResultScanner implements ResultScanner, AdvancedScanResultConsum
|
|||
|
||||
private ScanResumer resumer;
|
||||
|
||||
// Used to pass the span instance to the `AsyncTableImpl` from its underlying `rawAsyncTable`.
|
||||
private Span span = null;
|
||||
|
||||
public AsyncTableResultScanner(TableName tableName, Scan scan, long maxCacheSize) {
|
||||
this.tableName = tableName;
|
||||
this.maxCacheSize = maxCacheSize;
|
||||
|
@ -71,14 +75,22 @@ class AsyncTableResultScanner implements ResultScanner, AdvancedScanResultConsum
|
|||
|
||||
private void stopPrefetch(ScanController controller) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("{} stop prefetching when scanning {} as the cache size {}" +
|
||||
" is greater than the maxCacheSize {}",
|
||||
String.format("0x%x", System.identityHashCode(this)), tableName, cacheSize,
|
||||
maxCacheSize);
|
||||
LOG.debug(
|
||||
"{} stop prefetching when scanning {} as the cache size {}"
|
||||
+ " is greater than the maxCacheSize {}",
|
||||
String.format("0x%x", System.identityHashCode(this)), tableName, cacheSize, maxCacheSize);
|
||||
}
|
||||
resumer = controller.suspend();
|
||||
}
|
||||
|
||||
Span getSpan() {
|
||||
return span;
|
||||
}
|
||||
|
||||
void setSpan(final Span span) {
|
||||
this.span = span;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void onNext(Result[] results, ScanController controller) {
|
||||
assert results.length > 0;
|
||||
|
|
|
@ -18,6 +18,9 @@
|
|||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import static org.apache.hadoop.hbase.client.ConnectionUtils.calcEstimatedSize;
|
||||
|
||||
import io.opentelemetry.context.Context;
|
||||
import io.opentelemetry.context.Scope;
|
||||
import java.io.IOException;
|
||||
import java.io.InterruptedIOException;
|
||||
import java.util.Queue;
|
||||
|
@ -36,14 +39,13 @@ import org.apache.hadoop.hbase.util.Threads;
|
|||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
||||
/**
|
||||
* ClientAsyncPrefetchScanner implements async scanner behaviour.
|
||||
* Specifically, the cache used by this scanner is a concurrent queue which allows both
|
||||
* the producer (hbase client) and consumer (application) to access the queue in parallel.
|
||||
* The number of rows returned in a prefetch is defined by the caching factor and the result size
|
||||
* factor.
|
||||
* This class allocates a buffer cache, whose size is a function of both factors.
|
||||
* The prefetch is invoked when the cache is halffilled, instead of waiting for it to be empty.
|
||||
* This is defined in the method {@link ClientAsyncPrefetchScanner#prefetchCondition()}.
|
||||
* ClientAsyncPrefetchScanner implements async scanner behaviour. Specifically, the cache used by
|
||||
* this scanner is a concurrent queue which allows both the producer (hbase client) and consumer
|
||||
* (application) to access the queue in parallel. The number of rows returned in a prefetch is
|
||||
* defined by the caching factor and the result size factor. This class allocates a buffer cache,
|
||||
* whose size is a function of both factors. The prefetch is invoked when the cache is half-filled,
|
||||
* instead of waiting for it to be empty. This is defined in the method
|
||||
* {@link ClientAsyncPrefetchScanner#prefetchCondition()}.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class ClientAsyncPrefetchScanner extends ClientSimpleScanner {
|
||||
|
@ -66,7 +68,9 @@ public class ClientAsyncPrefetchScanner extends ClientSimpleScanner {
|
|||
super(configuration, scan, name, connection, rpcCallerFactory, rpcControllerFactory, pool,
|
||||
replicaCallTimeoutMicroSecondScan);
|
||||
exceptionsQueue = new ConcurrentLinkedQueue<>();
|
||||
Threads.setDaemonThreadRunning(new Thread(new PrefetchRunnable()), name + ".asyncPrefetcher");
|
||||
final Context context = Context.current();
|
||||
final Runnable runnable = context.wrap(new PrefetchRunnable());
|
||||
Threads.setDaemonThreadRunning(new Thread(runnable), name + ".asyncPrefetcher");
|
||||
}
|
||||
|
||||
void setPrefetchListener(Consumer<Boolean> prefetchListener) {
|
||||
|
@ -88,7 +92,7 @@ public class ClientAsyncPrefetchScanner extends ClientSimpleScanner {
|
|||
|
||||
@Override
|
||||
public Result next() throws IOException {
|
||||
try {
|
||||
try (Scope ignored = span.makeCurrent()) {
|
||||
lock.lock();
|
||||
while (cache.isEmpty()) {
|
||||
handleException();
|
||||
|
@ -98,6 +102,7 @@ public class ClientAsyncPrefetchScanner extends ClientSimpleScanner {
|
|||
try {
|
||||
notEmpty.await();
|
||||
} catch (InterruptedException e) {
|
||||
span.recordException(e);
|
||||
throw new InterruptedIOException("Interrupted when wait to load cache");
|
||||
}
|
||||
}
|
||||
|
@ -171,6 +176,7 @@ public class ClientAsyncPrefetchScanner extends ClientSimpleScanner {
|
|||
succeed = true;
|
||||
} catch (Exception e) {
|
||||
exceptionsQueue.add(e);
|
||||
span.recordException(e);
|
||||
} finally {
|
||||
notEmpty.signalAll();
|
||||
lock.unlock();
|
||||
|
@ -180,7 +186,6 @@ public class ClientAsyncPrefetchScanner extends ClientSimpleScanner {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -21,6 +21,9 @@ import static org.apache.hadoop.hbase.client.ConnectionUtils.calcEstimatedSize;
|
|||
import static org.apache.hadoop.hbase.client.ConnectionUtils.createScanResultCache;
|
||||
import static org.apache.hadoop.hbase.client.ConnectionUtils.incRegionCountMetrics;
|
||||
|
||||
import io.opentelemetry.api.trace.Span;
|
||||
import io.opentelemetry.api.trace.StatusCode;
|
||||
import io.opentelemetry.context.Scope;
|
||||
import java.io.IOException;
|
||||
import java.io.InterruptedIOException;
|
||||
import java.util.ArrayDeque;
|
||||
|
@ -40,13 +43,14 @@ import org.apache.hadoop.hbase.exceptions.ScannerResetException;
|
|||
import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
|
||||
import org.apache.hadoop.hbase.regionserver.LeaseException;
|
||||
import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
|
||||
|
||||
/**
|
||||
* Implements the scanner interface for the HBase client. If there are multiple regions in a table,
|
||||
* this scanner will iterate through them all.
|
||||
|
@ -76,6 +80,7 @@ public abstract class ClientScanner extends AbstractClientScanner {
|
|||
protected RpcRetryingCaller<Result[]> caller;
|
||||
protected RpcControllerFactory rpcControllerFactory;
|
||||
protected Configuration conf;
|
||||
protected final Span span;
|
||||
// The timeout on the primary. Applicable if there are multiple replicas for a region
|
||||
// In that case, we will only wait for this much timeout on the primary before going
|
||||
// to the replicas and trying the same scan. Note that the retries will still happen
|
||||
|
@ -92,7 +97,6 @@ public abstract class ClientScanner extends AbstractClientScanner {
|
|||
* @param scan {@link Scan} to use in this scanner
|
||||
* @param tableName The table that we wish to scan
|
||||
* @param connection Connection identifying the cluster
|
||||
* @throws IOException
|
||||
*/
|
||||
public ClientScanner(final Configuration conf, final Scan scan, final TableName tableName,
|
||||
ClusterConnection connection, RpcRetryingCallerFactory rpcFactory,
|
||||
|
@ -134,14 +138,14 @@ public abstract class ClientScanner extends AbstractClientScanner {
|
|||
this.rpcControllerFactory = controllerFactory;
|
||||
|
||||
this.conf = conf;
|
||||
this.span = Span.current();
|
||||
|
||||
this.scanResultCache = createScanResultCache(scan);
|
||||
initCache();
|
||||
}
|
||||
|
||||
protected final int getScanReplicaId() {
|
||||
return scan.getReplicaId() >= RegionReplicaUtil.DEFAULT_REPLICA_ID ? scan.getReplicaId() :
|
||||
RegionReplicaUtil.DEFAULT_REPLICA_ID;
|
||||
return Math.max(scan.getReplicaId(), RegionReplicaUtil.DEFAULT_REPLICA_ID);
|
||||
}
|
||||
|
||||
protected ClusterConnection getConnection() {
|
||||
|
@ -238,8 +242,8 @@ public abstract class ClientScanner extends AbstractClientScanner {
|
|||
if (LOG.isDebugEnabled() && this.currentRegion != null) {
|
||||
// Only worth logging if NOT first region in scan.
|
||||
LOG.debug(
|
||||
"Advancing internal scanner to startKey at '" + Bytes.toStringBinary(scan.getStartRow()) +
|
||||
"', " + (scan.includeStartRow() ? "inclusive" : "exclusive"));
|
||||
"Advancing internal scanner to startKey at '" + Bytes.toStringBinary(scan.getStartRow())
|
||||
+ "', " + (scan.includeStartRow() ? "inclusive" : "exclusive"));
|
||||
}
|
||||
// clear the current region, we will set a new value to it after the first call of the new
|
||||
// callable.
|
||||
|
@ -331,8 +335,8 @@ public abstract class ClientScanner extends AbstractClientScanner {
|
|||
// old time we always return empty result for a open scanner operation so we add a check here to
|
||||
// keep compatible with the old logic. Should remove the isOpenScanner in the future.
|
||||
// 2. Server tells us that it has no more results for this region.
|
||||
return (values.length == 0 && !callable.isHeartbeatMessage()) ||
|
||||
callable.moreResultsInRegion() == MoreResults.NO;
|
||||
return (values.length == 0 && !callable.isHeartbeatMessage())
|
||||
|| callable.moreResultsInRegion() == MoreResults.NO;
|
||||
}
|
||||
|
||||
private void closeScannerIfExhausted(boolean exhausted) throws IOException {
|
||||
|
@ -362,10 +366,10 @@ public abstract class ClientScanner extends AbstractClientScanner {
|
|||
// If exception is any but the list below throw it back to the client; else setup
|
||||
// the scanner and retry.
|
||||
Throwable cause = e.getCause();
|
||||
if ((cause != null && cause instanceof NotServingRegionException) ||
|
||||
(cause != null && cause instanceof RegionServerStoppedException) ||
|
||||
e instanceof OutOfOrderScannerNextException || e instanceof UnknownScannerException ||
|
||||
e instanceof ScannerResetException || e instanceof LeaseException) {
|
||||
if ((cause != null && cause instanceof NotServingRegionException)
|
||||
|| (cause != null && cause instanceof RegionServerStoppedException)
|
||||
|| e instanceof OutOfOrderScannerNextException || e instanceof UnknownScannerException
|
||||
|| e instanceof ScannerResetException || e instanceof LeaseException) {
|
||||
// Pass. It is easier writing the if loop test as list of what is allowed rather than
|
||||
// as a list of what is not allowed... so if in here, it means we do not throw.
|
||||
if (retriesLeft <= 0) {
|
||||
|
@ -489,8 +493,8 @@ public abstract class ClientScanner extends AbstractClientScanner {
|
|||
// processing of the scan is taking a long time server side. Rather than continue to
|
||||
// loop until a limit (e.g. size or caching) is reached, break out early to avoid causing
|
||||
// unnecesary delays to the caller
|
||||
LOG.trace("Heartbeat message received and cache contains Results. " +
|
||||
"Breaking out of scan loop");
|
||||
LOG.trace("Heartbeat message received and cache contains Results. "
|
||||
+ "Breaking out of scan loop");
|
||||
// we know that the region has not been exhausted yet so just break without calling
|
||||
// closeScannerIfExhausted
|
||||
break;
|
||||
|
@ -546,7 +550,10 @@ public abstract class ClientScanner extends AbstractClientScanner {
|
|||
|
||||
@Override
|
||||
public void close() {
|
||||
if (!scanMetricsPublished) writeScanMetrics();
|
||||
try (Scope ignored = span.makeCurrent()) {
|
||||
if (!scanMetricsPublished) {
|
||||
writeScanMetrics();
|
||||
}
|
||||
if (callable != null) {
|
||||
callable.setClose();
|
||||
try {
|
||||
|
@ -559,14 +566,21 @@ public abstract class ClientScanner extends AbstractClientScanner {
|
|||
} catch (IOException e) {
|
||||
/* An exception other than UnknownScanner is unexpected. */
|
||||
LOG.warn("scanner failed to close.", e);
|
||||
span.recordException(e);
|
||||
span.setStatus(StatusCode.ERROR);
|
||||
}
|
||||
callable = null;
|
||||
}
|
||||
closed = true;
|
||||
span.setStatus(StatusCode.OK);
|
||||
} finally {
|
||||
span.end();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean renewLease() {
|
||||
try (Scope ignored = span.makeCurrent()) {
|
||||
if (callable == null) {
|
||||
return false;
|
||||
}
|
||||
|
@ -577,11 +591,13 @@ public abstract class ClientScanner extends AbstractClientScanner {
|
|||
return true;
|
||||
} catch (Exception e) {
|
||||
LOG.debug("scanner failed to renew lease", e);
|
||||
span.recordException(e);
|
||||
return false;
|
||||
} finally {
|
||||
callable.setRenew(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected void initCache() {
|
||||
initSyncCache();
|
||||
|
@ -589,6 +605,8 @@ public abstract class ClientScanner extends AbstractClientScanner {
|
|||
|
||||
@Override
|
||||
public Result next() throws IOException {
|
||||
try (Scope ignored = span.makeCurrent()) {
|
||||
return nextWithSyncCache();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
/**
|
||||
*
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -30,6 +29,8 @@ import static org.apache.hadoop.hbase.util.ConcurrentMapUtils.computeIfAbsent;
|
|||
import static org.apache.hadoop.hbase.util.ConcurrentMapUtils.computeIfAbsentEx;
|
||||
|
||||
import edu.umd.cs.findbugs.annotations.Nullable;
|
||||
import io.opentelemetry.api.trace.Span;
|
||||
import io.opentelemetry.context.Scope;
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.io.InterruptedIOException;
|
||||
|
@ -69,6 +70,7 @@ import org.apache.hadoop.hbase.ZooKeeperConnectionException;
|
|||
import org.apache.hadoop.hbase.client.Scan.ReadType;
|
||||
import org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy;
|
||||
import org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicyFactory;
|
||||
import org.apache.hadoop.hbase.client.trace.TableOperationSpanBuilder;
|
||||
import org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil;
|
||||
import org.apache.hadoop.hbase.exceptions.ConnectionClosedException;
|
||||
import org.apache.hadoop.hbase.exceptions.RegionMovedException;
|
||||
|
@ -163,8 +165,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.Updat
|
|||
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse;
|
||||
|
||||
/**
|
||||
* Main implementation of {@link Connection} and {@link ClusterConnection} interfaces.
|
||||
* Encapsulates connection to zookeeper and regionservers.
|
||||
* Main implementation of {@link Connection} and {@link ClusterConnection} interfaces. Encapsulates
|
||||
* connection to zookeeper and regionservers.
|
||||
*/
|
||||
@edu.umd.cs.findbugs.annotations.SuppressWarnings(
|
||||
value = "AT_OPERATION_SEQUENCE_ON_CONCURRENT_ABSTRACTION",
|
||||
|
@ -186,8 +188,8 @@ public class ConnectionImplementation implements ClusterConnection, Closeable {
|
|||
final int rpcTimeout;
|
||||
|
||||
/**
|
||||
* Global nonceGenerator shared per client.Currently there's no reason to limit its scope.
|
||||
* Once it's set under nonceGeneratorCreateLock, it is never unset or changed.
|
||||
* Global nonceGenerator shared per client.Currently there's no reason to limit its scope. Once
|
||||
* it's set under nonceGeneratorCreateLock, it is never unset or changed.
|
||||
*/
|
||||
private static volatile NonceGenerator nonceGenerator = null;
|
||||
/** The nonce generator lock. Only taken when creating Connection, which gets a private copy. */
|
||||
|
@ -243,8 +245,7 @@ public class ConnectionImplementation implements ClusterConnection, Closeable {
|
|||
private final ClientBackoffPolicy backoffPolicy;
|
||||
|
||||
/**
|
||||
* Allow setting an alternate BufferedMutator implementation via
|
||||
* config. If null, use default.
|
||||
* Allow setting an alternate BufferedMutator implementation via config. If null, use default.
|
||||
*/
|
||||
private final String alternateBufferedMutatorClassName;
|
||||
|
||||
|
@ -274,8 +275,7 @@ public class ConnectionImplementation implements ClusterConnection, Closeable {
|
|||
this.batchPool = (ThreadPoolExecutor) pool;
|
||||
this.connectionConfig = new ConnectionConfiguration(conf);
|
||||
this.closed = false;
|
||||
this.pause = conf.getLong(HConstants.HBASE_CLIENT_PAUSE,
|
||||
HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
|
||||
this.pause = conf.getLong(HConstants.HBASE_CLIENT_PAUSE, HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
|
||||
long configuredPauseForCQTBE = conf.getLong(HConstants.HBASE_CLIENT_PAUSE_FOR_CQTBE, pause);
|
||||
if (configuredPauseForCQTBE < pause) {
|
||||
LOG.warn("The " + HConstants.HBASE_CLIENT_PAUSE_FOR_CQTBE + " setting: "
|
||||
|
@ -290,9 +290,8 @@ public class ConnectionImplementation implements ClusterConnection, Closeable {
|
|||
|
||||
// how many times to try, one more than max *retry* time
|
||||
this.numTries = retries2Attempts(connectionConfig.getRetriesNumber());
|
||||
this.rpcTimeout = conf.getInt(
|
||||
HConstants.HBASE_RPC_TIMEOUT_KEY,
|
||||
HConstants.DEFAULT_HBASE_RPC_TIMEOUT);
|
||||
this.rpcTimeout =
|
||||
conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT);
|
||||
if (conf.getBoolean(NonceGenerator.CLIENT_NONCES_ENABLED_KEY, true)) {
|
||||
synchronized (nonceGeneratorCreateLock) {
|
||||
if (nonceGenerator == null) {
|
||||
|
@ -317,16 +316,14 @@ public class ConnectionImplementation implements ClusterConnection, Closeable {
|
|||
}
|
||||
this.metaCache = new MetaCache(this.metrics);
|
||||
|
||||
boolean shouldListen = conf.getBoolean(HConstants.STATUS_PUBLISHED,
|
||||
HConstants.STATUS_PUBLISHED_DEFAULT);
|
||||
Class<? extends ClusterStatusListener.Listener> listenerClass =
|
||||
conf.getClass(ClusterStatusListener.STATUS_LISTENER_CLASS,
|
||||
ClusterStatusListener.DEFAULT_STATUS_LISTENER_CLASS,
|
||||
ClusterStatusListener.Listener.class);
|
||||
boolean shouldListen =
|
||||
conf.getBoolean(HConstants.STATUS_PUBLISHED, HConstants.STATUS_PUBLISHED_DEFAULT);
|
||||
Class<? extends ClusterStatusListener.Listener> listenerClass = conf.getClass(
|
||||
ClusterStatusListener.STATUS_LISTENER_CLASS,
|
||||
ClusterStatusListener.DEFAULT_STATUS_LISTENER_CLASS, ClusterStatusListener.Listener.class);
|
||||
|
||||
// Is there an alternate BufferedMutator to use?
|
||||
this.alternateBufferedMutatorClassName =
|
||||
this.conf.get(BufferedMutator.CLASSNAME_KEY);
|
||||
this.alternateBufferedMutatorClassName = this.conf.get(BufferedMutator.CLASSNAME_KEY);
|
||||
|
||||
try {
|
||||
if (registry == null) {
|
||||
|
@ -341,11 +338,11 @@ public class ConnectionImplementation implements ClusterConnection, Closeable {
|
|||
// Do we publish the status?
|
||||
if (shouldListen) {
|
||||
if (listenerClass == null) {
|
||||
LOG.warn(HConstants.STATUS_PUBLISHED + " is true, but " +
|
||||
ClusterStatusListener.STATUS_LISTENER_CLASS + " is not set - not listening status");
|
||||
LOG.warn(HConstants.STATUS_PUBLISHED + " is true, but "
|
||||
+ ClusterStatusListener.STATUS_LISTENER_CLASS + " is not set - not listening status");
|
||||
} else {
|
||||
clusterStatusListener = new ClusterStatusListener(
|
||||
new ClusterStatusListener.DeadServerHandler() {
|
||||
clusterStatusListener =
|
||||
new ClusterStatusListener(new ClusterStatusListener.DeadServerHandler() {
|
||||
@Override
|
||||
public void newDead(ServerName sn) {
|
||||
clearCaches(sn);
|
||||
|
@ -362,21 +359,21 @@ public class ConnectionImplementation implements ClusterConnection, Closeable {
|
|||
}
|
||||
|
||||
// Get the region locator's meta replica mode.
|
||||
this.metaReplicaMode = CatalogReplicaMode.fromString(conf.get(LOCATOR_META_REPLICAS_MODE,
|
||||
CatalogReplicaMode.NONE.toString()));
|
||||
this.metaReplicaMode = CatalogReplicaMode
|
||||
.fromString(conf.get(LOCATOR_META_REPLICAS_MODE, CatalogReplicaMode.NONE.toString()));
|
||||
|
||||
switch (this.metaReplicaMode) {
|
||||
case LOAD_BALANCE:
|
||||
String replicaSelectorClass = conf.get(
|
||||
RegionLocator.LOCATOR_META_REPLICAS_MODE_LOADBALANCE_SELECTOR,
|
||||
String replicaSelectorClass =
|
||||
conf.get(RegionLocator.LOCATOR_META_REPLICAS_MODE_LOADBALANCE_SELECTOR,
|
||||
CatalogReplicaLoadBalanceSimpleSelector.class.getName());
|
||||
|
||||
this.metaReplicaSelector = CatalogReplicaLoadBalanceSelectorFactory.createSelector(
|
||||
replicaSelectorClass, META_TABLE_NAME, getChoreService(), () -> {
|
||||
this.metaReplicaSelector = CatalogReplicaLoadBalanceSelectorFactory
|
||||
.createSelector(replicaSelectorClass, META_TABLE_NAME, getChoreService(), () -> {
|
||||
int numOfReplicas = 1;
|
||||
try {
|
||||
RegionLocations metaLocations = this.registry.getMetaRegionLocations().get(
|
||||
connectionConfig.getReadRpcTimeout(), TimeUnit.MILLISECONDS);
|
||||
RegionLocations metaLocations = this.registry.getMetaRegionLocations()
|
||||
.get(connectionConfig.getReadRpcTimeout(), TimeUnit.MILLISECONDS);
|
||||
numOfReplicas = metaLocations.size();
|
||||
} catch (Exception e) {
|
||||
LOG.error("Failed to get table {}'s region replication, ", META_TABLE_NAME, e);
|
||||
|
@ -387,8 +384,7 @@ public class ConnectionImplementation implements ClusterConnection, Closeable {
|
|||
case NONE:
|
||||
// If user does not configure LOCATOR_META_REPLICAS_MODE, let's check the legacy config.
|
||||
|
||||
boolean useMetaReplicas = conf.getBoolean(USE_META_REPLICAS,
|
||||
DEFAULT_USE_META_REPLICAS);
|
||||
boolean useMetaReplicas = conf.getBoolean(USE_META_REPLICAS, DEFAULT_USE_META_REPLICAS);
|
||||
if (useMetaReplicas) {
|
||||
this.metaReplicaMode = CatalogReplicaMode.HEDGED_READ;
|
||||
}
|
||||
|
@ -408,12 +404,10 @@ public class ConnectionImplementation implements ClusterConnection, Closeable {
|
|||
* @param cnm Replaces the nonce generator used, for testing.
|
||||
* @return old nonce generator.
|
||||
*/
|
||||
static NonceGenerator injectNonceGeneratorForTesting(
|
||||
ClusterConnection conn, NonceGenerator cnm) {
|
||||
static NonceGenerator injectNonceGeneratorForTesting(ClusterConnection conn, NonceGenerator cnm) {
|
||||
ConnectionImplementation connImpl = (ConnectionImplementation) conn;
|
||||
NonceGenerator ng = connImpl.getNonceGenerator();
|
||||
LOG.warn("Nonce generator is being replaced by test code for "
|
||||
+ cnm.getClass().getName());
|
||||
LOG.warn("Nonce generator is being replaced by test code for " + cnm.getClass().getName());
|
||||
nonceGenerator = cnm;
|
||||
return ng;
|
||||
}
|
||||
|
@ -552,10 +546,8 @@ public class ConnectionImplementation implements ClusterConnection, Closeable {
|
|||
long keepAliveTime = conf.getLong("hbase.hconnection.threads.keepalivetime", 60);
|
||||
BlockingQueue<Runnable> workQueue = passedWorkQueue;
|
||||
if (workQueue == null) {
|
||||
workQueue =
|
||||
new LinkedBlockingQueue<>(maxThreads *
|
||||
conf.getInt(HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS,
|
||||
HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS));
|
||||
workQueue = new LinkedBlockingQueue<>(maxThreads * conf.getInt(
|
||||
HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS, HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS));
|
||||
coreThreads = maxThreads;
|
||||
}
|
||||
ThreadPoolExecutor tpe =
|
||||
|
@ -575,10 +567,8 @@ public class ConnectionImplementation implements ClusterConnection, Closeable {
|
|||
// After that, requests will get queued up in the passed queue, and only after
|
||||
// the queue is full, a new thread will be started
|
||||
int threads = conf.getInt("hbase.hconnection.meta.lookup.threads.max", 128);
|
||||
this.metaLookupPool = getThreadPool(
|
||||
threads,
|
||||
threads,
|
||||
"-metaLookup-shared-", new LinkedBlockingQueue<>());
|
||||
this.metaLookupPool =
|
||||
getThreadPool(threads, threads, "-metaLookup-shared-", new LinkedBlockingQueue<>());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -668,9 +658,9 @@ public class ConnectionImplementation implements ClusterConnection, Closeable {
|
|||
}
|
||||
|
||||
/**
|
||||
* Like {@link ConnectionClosedException} but thrown from the checkClosed call which looks
|
||||
* at the local this.closed flag. We use this rather than {@link ConnectionClosedException}
|
||||
* because the latter does not inherit from DoNotRetryIOE (it should. TODO).
|
||||
* Like {@link ConnectionClosedException} but thrown from the checkClosed call which looks at the
|
||||
* local this.closed flag. We use this rather than {@link ConnectionClosedException} because the
|
||||
* latter does not inherit from DoNotRetryIOE (it should. TODO).
|
||||
*/
|
||||
private static class LocalConnectionClosedException extends DoNotRetryIOException {
|
||||
LocalConnectionClosedException(String message) {
|
||||
|
@ -705,7 +695,6 @@ public class ConnectionImplementation implements ClusterConnection, Closeable {
|
|||
return reload ? relocateRegion(tableName, row) : locateRegion(tableName, row);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public boolean isTableEnabled(TableName tableName) throws IOException {
|
||||
return getTableState(tableName).inStates(TableState.State.ENABLED);
|
||||
|
@ -740,8 +729,8 @@ public class ConnectionImplementation implements ClusterConnection, Closeable {
|
|||
LOG.debug("Table {} has not deployed region {}", tableName,
|
||||
pair.getFirst().getEncodedName());
|
||||
notDeployed++;
|
||||
} else if (splitKeys != null
|
||||
&& !Bytes.equals(info.getStartKey(), HConstants.EMPTY_BYTE_ARRAY)) {
|
||||
} else
|
||||
if (splitKeys != null && !Bytes.equals(info.getStartKey(), HConstants.EMPTY_BYTE_ARRAY)) {
|
||||
for (byte[] splitKey : splitKeys) {
|
||||
// Just check if the splitkey is available
|
||||
if (Bytes.equals(info.getStartKey(), splitKey)) {
|
||||
|
@ -838,8 +827,8 @@ public class ConnectionImplementation implements ClusterConnection, Closeable {
|
|||
}
|
||||
|
||||
@Override
|
||||
public RegionLocations relocateRegion(final TableName tableName,
|
||||
final byte [] row, int replicaId) throws IOException{
|
||||
public RegionLocations relocateRegion(final TableName tableName, final byte[] row, int replicaId)
|
||||
throws IOException {
|
||||
// Since this is an explicit request not to use any caching, finding
|
||||
// disabled tables should not be desirable. This will ensure that an exception is thrown when
|
||||
// the first time a disabled table is interacted with.
|
||||
|
@ -871,8 +860,8 @@ public class ConnectionImplementation implements ClusterConnection, Closeable {
|
|||
}
|
||||
}
|
||||
|
||||
private RegionLocations locateMeta(final TableName tableName,
|
||||
boolean useCache, int replicaId) throws IOException {
|
||||
private RegionLocations locateMeta(final TableName tableName, boolean useCache, int replicaId)
|
||||
throws IOException {
|
||||
// HBASE-10785: We cache the location of the META itself, so that we are not overloading
|
||||
// zookeeper with one request for every region lookup. We cache the META with empty row
|
||||
// key in MetaCache.
|
||||
|
@ -931,8 +920,8 @@ public class ConnectionImplementation implements ClusterConnection, Closeable {
|
|||
|
||||
switch (this.metaReplicaMode) {
|
||||
case LOAD_BALANCE:
|
||||
int metaReplicaId = this.metaReplicaSelector.select(tableName, row,
|
||||
RegionLocateType.CURRENT);
|
||||
int metaReplicaId =
|
||||
this.metaReplicaSelector.select(tableName, row, RegionLocateType.CURRENT);
|
||||
if (metaReplicaId != RegionInfo.DEFAULT_REPLICA_ID) {
|
||||
// If the selector gives a non-primary meta replica region, then go with it.
|
||||
// Otherwise, just go to primary in non-hedgedRead mode.
|
||||
|
@ -981,9 +970,12 @@ public class ConnectionImplementation implements ClusterConnection, Closeable {
|
|||
RegionInfo.DEFAULT_REPLICA_ID);
|
||||
}
|
||||
s.resetMvccReadPoint();
|
||||
try (ReversedClientScanner rcs =
|
||||
new ReversedClientScanner(conf, s, TableName.META_TABLE_NAME, this, rpcCallerFactory,
|
||||
rpcControllerFactory, getMetaLookupPool(), metaReplicaCallTimeoutScanInMicroSecond)) {
|
||||
final Span span = new TableOperationSpanBuilder(this)
|
||||
.setTableName(TableName.META_TABLE_NAME).setOperation(s).build();
|
||||
try (Scope ignored = span.makeCurrent();
|
||||
ReversedClientScanner rcs = new ReversedClientScanner(conf, s,
|
||||
TableName.META_TABLE_NAME, this, rpcCallerFactory, rpcControllerFactory,
|
||||
getMetaLookupPool(), metaReplicaCallTimeoutScanInMicroSecond)) {
|
||||
boolean tableNotFound = true;
|
||||
for (;;) {
|
||||
Result regionInfoRow = rcs.next();
|
||||
|
@ -1003,8 +995,8 @@ public class ConnectionImplementation implements ClusterConnection, Closeable {
|
|||
}
|
||||
RegionInfo regionInfo = locations.getRegionLocation(replicaId).getRegion();
|
||||
if (regionInfo == null) {
|
||||
throw new IOException("RegionInfo null or empty in " + TableName.META_TABLE_NAME +
|
||||
", row=" + regionInfoRow);
|
||||
throw new IOException("RegionInfo null or empty in " + TableName.META_TABLE_NAME
|
||||
+ ", row=" + regionInfoRow);
|
||||
}
|
||||
// See HBASE-20182. It is possible that we locate to a split parent even after the
|
||||
// children are online, so here we need to skip this region and go to the next one.
|
||||
|
@ -1012,8 +1004,8 @@ public class ConnectionImplementation implements ClusterConnection, Closeable {
|
|||
continue;
|
||||
}
|
||||
if (regionInfo.isOffline()) {
|
||||
throw new RegionOfflineException("Region offline; disable table call? " +
|
||||
regionInfo.getRegionNameAsString());
|
||||
throw new RegionOfflineException(
|
||||
"Region offline; disable table call? " + regionInfo.getRegionNameAsString());
|
||||
}
|
||||
// It is possible that the split children have not been online yet and we have skipped
|
||||
// the parent in the above condition, so we may have already reached a region which does
|
||||
|
@ -1024,14 +1016,14 @@ public class ConnectionImplementation implements ClusterConnection, Closeable {
|
|||
}
|
||||
ServerName serverName = locations.getRegionLocation(replicaId).getServerName();
|
||||
if (serverName == null) {
|
||||
throw new NoServerForRegionException("No server address listed in " +
|
||||
TableName.META_TABLE_NAME + " for region " + regionInfo.getRegionNameAsString() +
|
||||
" containing row " + Bytes.toStringBinary(row));
|
||||
throw new NoServerForRegionException("No server address listed in "
|
||||
+ TableName.META_TABLE_NAME + " for region " + regionInfo.getRegionNameAsString()
|
||||
+ " containing row " + Bytes.toStringBinary(row));
|
||||
}
|
||||
if (isDeadServer(serverName)) {
|
||||
throw new RegionServerStoppedException(
|
||||
"hbase:meta says the region " + regionInfo.getRegionNameAsString() +
|
||||
" is managed by the server " + serverName + ", but it is dead.");
|
||||
"hbase:meta says the region " + regionInfo.getRegionNameAsString()
|
||||
+ " is managed by the server " + serverName + ", but it is dead.");
|
||||
}
|
||||
// Instantiate the location
|
||||
cacheLocation(tableName, locations);
|
||||
|
@ -1057,8 +1049,10 @@ public class ConnectionImplementation implements ClusterConnection, Closeable {
|
|||
pauseBase = this.pauseForCQTBE;
|
||||
}
|
||||
if (tries < maxAttempts - 1) {
|
||||
LOG.debug("locateRegionInMeta parentTable='{}', attempt={} of {} failed; retrying " +
|
||||
"after sleep of {}", TableName.META_TABLE_NAME, tries, maxAttempts, maxAttempts, e);
|
||||
LOG.debug(
|
||||
"locateRegionInMeta parentTable='{}', attempt={} of {} failed; retrying "
|
||||
+ "after sleep of {}",
|
||||
TableName.META_TABLE_NAME, tries, maxAttempts, maxAttempts, e);
|
||||
} else {
|
||||
throw e;
|
||||
}
|
||||
|
@ -1071,8 +1065,8 @@ public class ConnectionImplementation implements ClusterConnection, Closeable {
|
|||
try {
|
||||
Thread.sleep(ConnectionUtils.getPauseTime(pauseBase, tries));
|
||||
} catch (InterruptedException e) {
|
||||
throw new InterruptedIOException("Giving up trying to location region in " +
|
||||
"meta: thread is interrupted.");
|
||||
throw new InterruptedIOException(
|
||||
"Giving up trying to location region in " + "meta: thread is interrupted.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1081,8 +1075,8 @@ public class ConnectionImplementation implements ClusterConnection, Closeable {
|
|||
try {
|
||||
long waitTime = connectionConfig.getMetaOperationTimeout();
|
||||
if (!userRegionLock.tryLock(waitTime, TimeUnit.MILLISECONDS)) {
|
||||
throw new LockTimeoutException("Failed to get user region lock in"
|
||||
+ waitTime + " ms. " + " for accessing meta region server.");
|
||||
throw new LockTimeoutException("Failed to get user region lock in" + waitTime + " ms. "
|
||||
+ " for accessing meta region server.");
|
||||
}
|
||||
} catch (InterruptedException ie) {
|
||||
LOG.error("Interrupted while waiting for a lock", ie);
|
||||
|
@ -1101,12 +1095,11 @@ public class ConnectionImplementation implements ClusterConnection, Closeable {
|
|||
}
|
||||
|
||||
/**
|
||||
* Search the cache for a location that fits our table and row key.
|
||||
* Return null if no suitable region is located.
|
||||
* Search the cache for a location that fits our table and row key. Return null if no suitable
|
||||
* region is located.
|
||||
* @return Null or region location found in cache.
|
||||
*/
|
||||
RegionLocations getCachedLocation(final TableName tableName,
|
||||
final byte [] row) {
|
||||
RegionLocations getCachedLocation(final TableName tableName, final byte[] row) {
|
||||
return metaCache.getCachedLocation(tableName, row);
|
||||
}
|
||||
|
||||
|
@ -1189,7 +1182,8 @@ public class ConnectionImplementation implements ClusterConnection, Closeable {
|
|||
*/
|
||||
static class ServerErrorTracker {
|
||||
// We need a concurrent map here, as we could have multiple threads updating it in parallel.
|
||||
private final ConcurrentMap<ServerName, ServerErrors> errorsByServer = new ConcurrentHashMap<>();
|
||||
private final ConcurrentMap<ServerName, ServerErrors> errorsByServer =
|
||||
new ConcurrentHashMap<>();
|
||||
private final long canRetryUntil;
|
||||
private final int maxTries;// max number to try
|
||||
private final long startTrackingTime;
|
||||
|
@ -1211,13 +1205,12 @@ public class ConnectionImplementation implements ClusterConnection, Closeable {
|
|||
*/
|
||||
boolean canTryMore(int numAttempt) {
|
||||
// If there is a single try we must not take into account the time.
|
||||
return numAttempt < maxTries || (maxTries > 1 &&
|
||||
EnvironmentEdgeManager.currentTime() < this.canRetryUntil);
|
||||
return numAttempt < maxTries
|
||||
|| (maxTries > 1 && EnvironmentEdgeManager.currentTime() < this.canRetryUntil);
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculates the back-off time for a retrying request to a particular server.
|
||||
*
|
||||
* @param server The server in question.
|
||||
* @param basePause The default hci pause.
|
||||
* @return The time to wait before sending next request.
|
||||
|
@ -1292,8 +1285,7 @@ public class ConnectionImplementation implements ClusterConnection, Closeable {
|
|||
throw new MasterNotRunningException(sn + " is dead.");
|
||||
}
|
||||
// Use the security info interface name as our stub key
|
||||
String key =
|
||||
getStubKey(MasterProtos.MasterService.getDescriptor().getName(), sn);
|
||||
String key = getStubKey(MasterProtos.MasterService.getDescriptor().getName(), sn);
|
||||
MasterProtos.MasterService.BlockingInterface stub =
|
||||
(MasterProtos.MasterService.BlockingInterface) computeIfAbsentEx(stubs, key, () -> {
|
||||
BlockingRpcChannel channel = rpcClient.createBlockingRpcChannel(sn, user, rpcTimeout);
|
||||
|
@ -1355,8 +1347,8 @@ public class ConnectionImplementation implements ClusterConnection, Closeable {
|
|||
if (isDeadServer(serverName)) {
|
||||
throw new RegionServerStoppedException(serverName + " is dead.");
|
||||
}
|
||||
String key = getStubKey(ClientProtos.ClientService.BlockingInterface.class.getName(),
|
||||
serverName);
|
||||
String key =
|
||||
getStubKey(ClientProtos.ClientService.BlockingInterface.class.getName(), serverName);
|
||||
return (ClientProtos.ClientService.BlockingInterface) computeIfAbsentEx(stubs, key, () -> {
|
||||
BlockingRpcChannel channel =
|
||||
this.rpcClient.createBlockingRpcChannel(serverName, user, rpcTimeout);
|
||||
|
@ -1389,44 +1381,38 @@ public class ConnectionImplementation implements ClusterConnection, Closeable {
|
|||
MasterServiceState mss = masterServiceState;
|
||||
|
||||
@Override
|
||||
public MasterProtos.AbortProcedureResponse abortProcedure(
|
||||
RpcController controller,
|
||||
public MasterProtos.AbortProcedureResponse abortProcedure(RpcController controller,
|
||||
MasterProtos.AbortProcedureRequest request) throws ServiceException {
|
||||
return stub.abortProcedure(controller, request);
|
||||
}
|
||||
|
||||
@Override
|
||||
public MasterProtos.GetProceduresResponse getProcedures(
|
||||
RpcController controller,
|
||||
public MasterProtos.GetProceduresResponse getProcedures(RpcController controller,
|
||||
MasterProtos.GetProceduresRequest request) throws ServiceException {
|
||||
return stub.getProcedures(controller, request);
|
||||
}
|
||||
|
||||
@Override
|
||||
public MasterProtos.GetLocksResponse getLocks(
|
||||
RpcController controller,
|
||||
public MasterProtos.GetLocksResponse getLocks(RpcController controller,
|
||||
MasterProtos.GetLocksRequest request) throws ServiceException {
|
||||
return stub.getLocks(controller, request);
|
||||
}
|
||||
|
||||
@Override
|
||||
public MasterProtos.AddColumnResponse addColumn(
|
||||
RpcController controller,
|
||||
public MasterProtos.AddColumnResponse addColumn(RpcController controller,
|
||||
MasterProtos.AddColumnRequest request) throws ServiceException {
|
||||
return stub.addColumn(controller, request);
|
||||
}
|
||||
|
||||
@Override
|
||||
public MasterProtos.DeleteColumnResponse deleteColumn(RpcController controller,
|
||||
MasterProtos.DeleteColumnRequest request)
|
||||
throws ServiceException {
|
||||
MasterProtos.DeleteColumnRequest request) throws ServiceException {
|
||||
return stub.deleteColumn(controller, request);
|
||||
}
|
||||
|
||||
@Override
|
||||
public MasterProtos.ModifyColumnResponse modifyColumn(RpcController controller,
|
||||
MasterProtos.ModifyColumnRequest request)
|
||||
throws ServiceException {
|
||||
MasterProtos.ModifyColumnRequest request) throws ServiceException {
|
||||
return stub.modifyColumn(controller, request);
|
||||
}
|
||||
|
||||
|
@ -1437,9 +1423,8 @@ public class ConnectionImplementation implements ClusterConnection, Closeable {
|
|||
}
|
||||
|
||||
@Override
|
||||
public MasterProtos.MergeTableRegionsResponse mergeTableRegions(
|
||||
RpcController controller, MasterProtos.MergeTableRegionsRequest request)
|
||||
throws ServiceException {
|
||||
public MasterProtos.MergeTableRegionsResponse mergeTableRegions(RpcController controller,
|
||||
MasterProtos.MergeTableRegionsRequest request) throws ServiceException {
|
||||
return stub.mergeTableRegions(controller, request);
|
||||
}
|
||||
|
||||
|
@ -1517,8 +1502,8 @@ public class ConnectionImplementation implements ClusterConnection, Closeable {
|
|||
|
||||
@Override
|
||||
public MasterProtos.IsInMaintenanceModeResponse isMasterInMaintenanceMode(
|
||||
final RpcController controller,
|
||||
final MasterProtos.IsInMaintenanceModeRequest request) throws ServiceException {
|
||||
final RpcController controller, final MasterProtos.IsInMaintenanceModeRequest request)
|
||||
throws ServiceException {
|
||||
return stub.isMasterInMaintenanceMode(controller, request);
|
||||
}
|
||||
|
||||
|
@ -1529,22 +1514,20 @@ public class ConnectionImplementation implements ClusterConnection, Closeable {
|
|||
}
|
||||
|
||||
@Override
|
||||
public MasterProtos.SetBalancerRunningResponse setBalancerRunning(
|
||||
RpcController controller, MasterProtos.SetBalancerRunningRequest request)
|
||||
throws ServiceException {
|
||||
public MasterProtos.SetBalancerRunningResponse setBalancerRunning(RpcController controller,
|
||||
MasterProtos.SetBalancerRunningRequest request) throws ServiceException {
|
||||
return stub.setBalancerRunning(controller, request);
|
||||
}
|
||||
|
||||
@Override
|
||||
public NormalizeResponse normalize(RpcController controller,
|
||||
NormalizeRequest request) throws ServiceException {
|
||||
public NormalizeResponse normalize(RpcController controller, NormalizeRequest request)
|
||||
throws ServiceException {
|
||||
return stub.normalize(controller, request);
|
||||
}
|
||||
|
||||
@Override
|
||||
public SetNormalizerRunningResponse setNormalizerRunning(
|
||||
RpcController controller, SetNormalizerRunningRequest request)
|
||||
throws ServiceException {
|
||||
public SetNormalizerRunningResponse setNormalizerRunning(RpcController controller,
|
||||
SetNormalizerRunningRequest request) throws ServiceException {
|
||||
return stub.setNormalizerRunning(controller, request);
|
||||
}
|
||||
|
||||
|
@ -1570,8 +1553,7 @@ public class ConnectionImplementation implements ClusterConnection, Closeable {
|
|||
|
||||
@Override
|
||||
public MasterProtos.RunCleanerChoreResponse runCleanerChore(RpcController controller,
|
||||
MasterProtos.RunCleanerChoreRequest request)
|
||||
throws ServiceException {
|
||||
MasterProtos.RunCleanerChoreRequest request) throws ServiceException {
|
||||
return stub.runCleanerChore(controller, request);
|
||||
}
|
||||
|
||||
|
@ -1590,9 +1572,8 @@ public class ConnectionImplementation implements ClusterConnection, Closeable {
|
|||
}
|
||||
|
||||
@Override
|
||||
public ClientProtos.CoprocessorServiceResponse execMasterService(
|
||||
RpcController controller, ClientProtos.CoprocessorServiceRequest request)
|
||||
throws ServiceException {
|
||||
public ClientProtos.CoprocessorServiceResponse execMasterService(RpcController controller,
|
||||
ClientProtos.CoprocessorServiceRequest request) throws ServiceException {
|
||||
return stub.execMasterService(controller, request);
|
||||
}
|
||||
|
||||
|
@ -1622,16 +1603,14 @@ public class ConnectionImplementation implements ClusterConnection, Closeable {
|
|||
}
|
||||
|
||||
@Override
|
||||
public MasterProtos.RestoreSnapshotResponse restoreSnapshot(
|
||||
RpcController controller, MasterProtos.RestoreSnapshotRequest request)
|
||||
throws ServiceException {
|
||||
public MasterProtos.RestoreSnapshotResponse restoreSnapshot(RpcController controller,
|
||||
MasterProtos.RestoreSnapshotRequest request) throws ServiceException {
|
||||
return stub.restoreSnapshot(controller, request);
|
||||
}
|
||||
|
||||
@Override
|
||||
public MasterProtos.SetSnapshotCleanupResponse switchSnapshotCleanup(
|
||||
RpcController controller, MasterProtos.SetSnapshotCleanupRequest request)
|
||||
throws ServiceException {
|
||||
public MasterProtos.SetSnapshotCleanupResponse switchSnapshotCleanup(RpcController controller,
|
||||
MasterProtos.SetSnapshotCleanupRequest request) throws ServiceException {
|
||||
return stub.switchSnapshotCleanup(controller, request);
|
||||
}
|
||||
|
||||
|
@ -1643,16 +1622,14 @@ public class ConnectionImplementation implements ClusterConnection, Closeable {
|
|||
}
|
||||
|
||||
@Override
|
||||
public MasterProtos.ExecProcedureResponse execProcedure(
|
||||
RpcController controller, MasterProtos.ExecProcedureRequest request)
|
||||
throws ServiceException {
|
||||
public MasterProtos.ExecProcedureResponse execProcedure(RpcController controller,
|
||||
MasterProtos.ExecProcedureRequest request) throws ServiceException {
|
||||
return stub.execProcedure(controller, request);
|
||||
}
|
||||
|
||||
@Override
|
||||
public MasterProtos.ExecProcedureResponse execProcedureWithRet(
|
||||
RpcController controller, MasterProtos.ExecProcedureRequest request)
|
||||
throws ServiceException {
|
||||
public MasterProtos.ExecProcedureResponse execProcedureWithRet(RpcController controller,
|
||||
MasterProtos.ExecProcedureRequest request) throws ServiceException {
|
||||
return stub.execProcedureWithRet(controller, request);
|
||||
}
|
||||
|
||||
|
@ -1669,51 +1646,46 @@ public class ConnectionImplementation implements ClusterConnection, Closeable {
|
|||
}
|
||||
|
||||
@Override
|
||||
public MasterProtos.IsMasterRunningResponse isMasterRunning(
|
||||
RpcController controller, MasterProtos.IsMasterRunningRequest request)
|
||||
throws ServiceException {
|
||||
public MasterProtos.IsMasterRunningResponse isMasterRunning(RpcController controller,
|
||||
MasterProtos.IsMasterRunningRequest request) throws ServiceException {
|
||||
return stub.isMasterRunning(controller, request);
|
||||
}
|
||||
|
||||
@Override
|
||||
public MasterProtos.ModifyNamespaceResponse modifyNamespace(RpcController controller,
|
||||
MasterProtos.ModifyNamespaceRequest request)
|
||||
throws ServiceException {
|
||||
MasterProtos.ModifyNamespaceRequest request) throws ServiceException {
|
||||
return stub.modifyNamespace(controller, request);
|
||||
}
|
||||
|
||||
@Override
|
||||
public MasterProtos.CreateNamespaceResponse createNamespace(
|
||||
RpcController controller,
|
||||
public MasterProtos.CreateNamespaceResponse createNamespace(RpcController controller,
|
||||
MasterProtos.CreateNamespaceRequest request) throws ServiceException {
|
||||
return stub.createNamespace(controller, request);
|
||||
}
|
||||
|
||||
@Override
|
||||
public MasterProtos.DeleteNamespaceResponse deleteNamespace(
|
||||
RpcController controller,
|
||||
public MasterProtos.DeleteNamespaceResponse deleteNamespace(RpcController controller,
|
||||
MasterProtos.DeleteNamespaceRequest request) throws ServiceException {
|
||||
return stub.deleteNamespace(controller, request);
|
||||
}
|
||||
|
||||
@Override
|
||||
public MasterProtos.ListNamespacesResponse listNamespaces(
|
||||
RpcController controller,
|
||||
public MasterProtos.ListNamespacesResponse listNamespaces(RpcController controller,
|
||||
MasterProtos.ListNamespacesRequest request) throws ServiceException {
|
||||
return stub.listNamespaces(controller, request);
|
||||
}
|
||||
|
||||
@Override
|
||||
public MasterProtos.GetNamespaceDescriptorResponse getNamespaceDescriptor(
|
||||
RpcController controller,
|
||||
MasterProtos.GetNamespaceDescriptorRequest request) throws ServiceException {
|
||||
RpcController controller, MasterProtos.GetNamespaceDescriptorRequest request)
|
||||
throws ServiceException {
|
||||
return stub.getNamespaceDescriptor(controller, request);
|
||||
}
|
||||
|
||||
@Override
|
||||
public MasterProtos.ListNamespaceDescriptorsResponse listNamespaceDescriptors(
|
||||
RpcController controller,
|
||||
MasterProtos.ListNamespaceDescriptorsRequest request) throws ServiceException {
|
||||
RpcController controller, MasterProtos.ListNamespaceDescriptorsRequest request)
|
||||
throws ServiceException {
|
||||
return stub.listNamespaceDescriptors(controller, request);
|
||||
}
|
||||
|
||||
|
@ -1732,9 +1704,8 @@ public class ConnectionImplementation implements ClusterConnection, Closeable {
|
|||
}
|
||||
|
||||
@Override
|
||||
public MasterProtos.GetTableStateResponse getTableState(
|
||||
RpcController controller, MasterProtos.GetTableStateRequest request)
|
||||
throws ServiceException {
|
||||
public MasterProtos.GetTableStateResponse getTableState(RpcController controller,
|
||||
MasterProtos.GetTableStateRequest request) throws ServiceException {
|
||||
return stub.getTableState(controller, request);
|
||||
}
|
||||
|
||||
|
@ -1751,30 +1722,26 @@ public class ConnectionImplementation implements ClusterConnection, Closeable {
|
|||
}
|
||||
|
||||
@Override
|
||||
public MasterProtos.GetTableDescriptorsResponse getTableDescriptors(
|
||||
RpcController controller, MasterProtos.GetTableDescriptorsRequest request)
|
||||
throws ServiceException {
|
||||
public MasterProtos.GetTableDescriptorsResponse getTableDescriptors(RpcController controller,
|
||||
MasterProtos.GetTableDescriptorsRequest request) throws ServiceException {
|
||||
return stub.getTableDescriptors(controller, request);
|
||||
}
|
||||
|
||||
@Override
|
||||
public MasterProtos.GetTableNamesResponse getTableNames(
|
||||
RpcController controller, MasterProtos.GetTableNamesRequest request)
|
||||
throws ServiceException {
|
||||
public MasterProtos.GetTableNamesResponse getTableNames(RpcController controller,
|
||||
MasterProtos.GetTableNamesRequest request) throws ServiceException {
|
||||
return stub.getTableNames(controller, request);
|
||||
}
|
||||
|
||||
@Override
|
||||
public MasterProtos.GetClusterStatusResponse getClusterStatus(
|
||||
RpcController controller, MasterProtos.GetClusterStatusRequest request)
|
||||
throws ServiceException {
|
||||
public MasterProtos.GetClusterStatusResponse getClusterStatus(RpcController controller,
|
||||
MasterProtos.GetClusterStatusRequest request) throws ServiceException {
|
||||
return stub.getClusterStatus(controller, request);
|
||||
}
|
||||
|
||||
@Override
|
||||
public MasterProtos.SetQuotaResponse setQuota(
|
||||
RpcController controller, MasterProtos.SetQuotaRequest request)
|
||||
throws ServiceException {
|
||||
public MasterProtos.SetQuotaResponse setQuota(RpcController controller,
|
||||
MasterProtos.SetQuotaRequest request) throws ServiceException {
|
||||
return stub.setQuota(controller, request);
|
||||
}
|
||||
|
||||
|
@ -1849,8 +1816,9 @@ public class ConnectionImplementation implements ClusterConnection, Closeable {
|
|||
}
|
||||
|
||||
@Override
|
||||
public ListDecommissionedRegionServersResponse listDecommissionedRegionServers(RpcController controller,
|
||||
ListDecommissionedRegionServersRequest request) throws ServiceException {
|
||||
public ListDecommissionedRegionServersResponse listDecommissionedRegionServers(
|
||||
RpcController controller, ListDecommissionedRegionServersRequest request)
|
||||
throws ServiceException {
|
||||
return stub.listDecommissionedRegionServers(controller, request);
|
||||
}
|
||||
|
||||
|
@ -1861,9 +1829,8 @@ public class ConnectionImplementation implements ClusterConnection, Closeable {
|
|||
}
|
||||
|
||||
@Override
|
||||
public RecommissionRegionServerResponse recommissionRegionServer(
|
||||
RpcController controller, RecommissionRegionServerRequest request)
|
||||
throws ServiceException {
|
||||
public RecommissionRegionServerResponse recommissionRegionServer(RpcController controller,
|
||||
RecommissionRegionServerRequest request) throws ServiceException {
|
||||
return stub.recommissionRegionServer(controller, request);
|
||||
}
|
||||
|
||||
|
@ -1887,15 +1854,14 @@ public class ConnectionImplementation implements ClusterConnection, Closeable {
|
|||
}
|
||||
|
||||
@Override
|
||||
public GetSpaceQuotaRegionSizesResponse getSpaceQuotaRegionSizes(
|
||||
RpcController controller, GetSpaceQuotaRegionSizesRequest request)
|
||||
throws ServiceException {
|
||||
public GetSpaceQuotaRegionSizesResponse getSpaceQuotaRegionSizes(RpcController controller,
|
||||
GetSpaceQuotaRegionSizesRequest request) throws ServiceException {
|
||||
return stub.getSpaceQuotaRegionSizes(controller, request);
|
||||
}
|
||||
|
||||
@Override
|
||||
public GetQuotaStatesResponse getQuotaStates(
|
||||
RpcController controller, GetQuotaStatesRequest request) throws ServiceException {
|
||||
public GetQuotaStatesResponse getQuotaStates(RpcController controller,
|
||||
GetQuotaStatesRequest request) throws ServiceException {
|
||||
return stub.getQuotaStates(controller, request);
|
||||
}
|
||||
|
||||
|
@ -2030,19 +1996,19 @@ public class ConnectionImplementation implements ClusterConnection, Closeable {
|
|||
}
|
||||
|
||||
/**
|
||||
* Update the location with the new value (if the exception is a RegionMovedException)
|
||||
* or delete it from the cache. Does nothing if we can be sure from the exception that
|
||||
* the location is still accurate, or if the cache has already been updated.
|
||||
* @param exception an object (to simplify user code) on which we will try to find a nested
|
||||
* or wrapped or both RegionMovedException
|
||||
* Update the location with the new value (if the exception is a RegionMovedException) or delete
|
||||
* it from the cache. Does nothing if we can be sure from the exception that the location is still
|
||||
* accurate, or if the cache has already been updated.
|
||||
* @param exception an object (to simplify user code) on which we will try to find a nested or
|
||||
* wrapped or both RegionMovedException
|
||||
* @param source server that is the source of the location update.
|
||||
*/
|
||||
@Override
|
||||
public void updateCachedLocations(final TableName tableName, byte[] regionName, byte[] rowkey,
|
||||
final Object exception, final ServerName source) {
|
||||
if (rowkey == null || tableName == null) {
|
||||
LOG.warn("Coding error, see method javadoc. row=" + (rowkey == null ? "null" : rowkey) +
|
||||
", tableName=" + (tableName == null ? "null" : tableName));
|
||||
LOG.warn("Coding error, see method javadoc. row=" + (rowkey == null ? "null" : rowkey)
|
||||
+ ", tableName=" + (tableName == null ? "null" : tableName));
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -2083,14 +2049,12 @@ public class ConnectionImplementation implements ClusterConnection, Closeable {
|
|||
if (cause instanceof RegionMovedException) {
|
||||
RegionMovedException rme = (RegionMovedException) cause;
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace("Region " + regionInfo.getRegionNameAsString() + " moved to " +
|
||||
rme.getHostname() + ":" + rme.getPort() +
|
||||
" according to " + source.getAddress());
|
||||
LOG.trace("Region " + regionInfo.getRegionNameAsString() + " moved to "
|
||||
+ rme.getHostname() + ":" + rme.getPort() + " according to " + source.getAddress());
|
||||
}
|
||||
// We know that the region is not anymore on this region server, but we know
|
||||
// the new location.
|
||||
updateCachedLocation(
|
||||
regionInfo, source, rme.getServerName(), rme.getLocationSeqNum());
|
||||
updateCachedLocation(regionInfo, source, rme.getServerName(), rme.getLocationSeqNum());
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -2127,8 +2091,7 @@ public class ConnectionImplementation implements ClusterConnection, Closeable {
|
|||
}
|
||||
|
||||
/*
|
||||
* Return the number of cached region for a table. It will only be called
|
||||
* from a unit test.
|
||||
* Return the number of cached region for a table. It will only be called from a unit test.
|
||||
*/
|
||||
int getNumberOfCachedRegionLocations(final TableName tableName) {
|
||||
return metaCache.getNumberOfCachedRegionLocations(tableName);
|
||||
|
@ -2187,12 +2150,11 @@ public class ConnectionImplementation implements ClusterConnection, Closeable {
|
|||
}
|
||||
|
||||
/**
|
||||
* Close the connection for good. On the off chance that someone is unable to close
|
||||
* the connection, perhaps because it bailed out prematurely, the method
|
||||
* below will ensure that this instance is cleaned up.
|
||||
* Caveat: The JVM may take an unknown amount of time to call finalize on an
|
||||
* unreachable object, so our hope is that every consumer cleans up after
|
||||
* itself, like any good citizen.
|
||||
* Close the connection for good. On the off chance that someone is unable to close the
|
||||
* connection, perhaps because it bailed out prematurely, the method below will ensure that this
|
||||
* instance is cleaned up. Caveat: The JVM may take an unknown amount of time to call finalize on
|
||||
* an unreachable object, so our hope is that every consumer cleans up after itself, like any good
|
||||
* citizen.
|
||||
*/
|
||||
@Override
|
||||
protected void finalize() throws Throwable {
|
||||
|
@ -2217,8 +2179,8 @@ public class ConnectionImplementation implements ClusterConnection, Closeable {
|
|||
|
||||
@Override
|
||||
public RpcRetryingCallerFactory getNewRpcRetryingCallerFactory(Configuration conf) {
|
||||
return RpcRetryingCallerFactory
|
||||
.instantiate(conf, this.interceptor, this.getStatisticsTracker());
|
||||
return RpcRetryingCallerFactory.instantiate(conf, this.interceptor,
|
||||
this.getStatisticsTracker());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -22,11 +22,11 @@ package org.apache.hadoop.hbase.client;
|
|||
// SEE ABOVE NOTE!
|
||||
|
||||
import static org.apache.hadoop.hbase.client.ConnectionUtils.checkHasFamilies;
|
||||
|
||||
import com.google.protobuf.Descriptors;
|
||||
import com.google.protobuf.Message;
|
||||
import com.google.protobuf.Service;
|
||||
import com.google.protobuf.ServiceException;
|
||||
|
||||
import io.opentelemetry.api.trace.Span;
|
||||
import io.opentelemetry.api.trace.SpanKind;
|
||||
import io.opentelemetry.api.trace.StatusCode;
|
||||
|
@ -75,6 +75,7 @@ import org.slf4j.LoggerFactory;
|
|||
|
||||
import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
|
||||
import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
|
||||
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
|
||||
|
@ -84,23 +85,20 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutateRequ
|
|||
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutateResponse;
|
||||
|
||||
/**
|
||||
* An implementation of {@link Table}. Used to communicate with a single HBase table.
|
||||
* Lightweight. Get as needed and just close when done.
|
||||
* Instances of this class SHOULD NOT be constructed directly.
|
||||
* Obtain an instance via {@link Connection}. See {@link ConnectionFactory}
|
||||
* class comment for an example of how.
|
||||
*
|
||||
* <p>This class is thread safe since 2.0.0 if not invoking any of the setter methods.
|
||||
* All setters are moved into {@link TableBuilder} and reserved here only for keeping
|
||||
* backward compatibility, and TODO will be removed soon.
|
||||
*
|
||||
* <p>HTable is no longer a client API. Use {@link Table} instead. It is marked
|
||||
* InterfaceAudience.Private indicating that this is an HBase-internal class as defined in
|
||||
* <a href="https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/InterfaceClassification.html">Hadoop
|
||||
* Interface Classification</a>
|
||||
* There are no guarantees for backwards source / binary compatibility and methods or class can
|
||||
* change or go away without deprecation.
|
||||
*
|
||||
* An implementation of {@link Table}. Used to communicate with a single HBase table. Lightweight.
|
||||
* Get as needed and just close when done. Instances of this class SHOULD NOT be constructed
|
||||
* directly. Obtain an instance via {@link Connection}. See {@link ConnectionFactory} class comment
|
||||
* for an example of how.
|
||||
* <p>
|
||||
* This class is thread safe since 2.0.0 if not invoking any of the setter methods. All setters are
|
||||
* moved into {@link TableBuilder} and reserved here only for keeping backward compatibility, and
|
||||
* TODO will be removed soon.
|
||||
* <p>
|
||||
* HTable is no longer a client API. Use {@link Table} instead. It is marked
|
||||
* InterfaceAudience.Private indicating that this is an HBase-internal class as defined in <a href=
|
||||
* "https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/InterfaceClassification.html">Hadoop
|
||||
* Interface Classification</a> There are no guarantees for backwards source / binary compatibility
|
||||
* and methods or class can change or go away without deprecation.
|
||||
* @see Table
|
||||
* @see Admin
|
||||
* @see Connection
|
||||
|
@ -154,9 +152,9 @@ public class HTable implements Table {
|
|||
}
|
||||
|
||||
/**
|
||||
* Creates an object to access a HBase table.
|
||||
* Used by HBase internally. DO NOT USE. See {@link ConnectionFactory} class comment for how to
|
||||
* get a {@link Table} instance (use {@link Table} instead of {@link HTable}).
|
||||
* Creates an object to access a HBase table. Used by HBase internally. DO NOT USE. See
|
||||
* {@link ConnectionFactory} class comment for how to get a {@link Table} instance (use
|
||||
* {@link Table} instead of {@link HTable}).
|
||||
* @param connection Connection to be used.
|
||||
* @param builder The table builder
|
||||
* @param rpcCallerFactory The RPC caller factory
|
||||
|
@ -164,11 +162,9 @@ public class HTable implements Table {
|
|||
* @param pool ExecutorService to be used.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
protected HTable(final ConnectionImplementation connection,
|
||||
final TableBuilderBase builder,
|
||||
protected HTable(final ConnectionImplementation connection, final TableBuilderBase builder,
|
||||
final RpcRetryingCallerFactory rpcCallerFactory,
|
||||
final RpcControllerFactory rpcControllerFactory,
|
||||
final ExecutorService pool) {
|
||||
final RpcControllerFactory rpcControllerFactory, final ExecutorService pool) {
|
||||
this.connection = Preconditions.checkNotNull(connection, "connection is null");
|
||||
this.configuration = connection.getConfiguration();
|
||||
this.connConfiguration = connection.getConnectionConfiguration();
|
||||
|
@ -222,8 +218,7 @@ public class HTable implements Table {
|
|||
}
|
||||
|
||||
/**
|
||||
* <em>INTERNAL</em> Used by unit tests and tools to do low-level
|
||||
* manipulations.
|
||||
* <em>INTERNAL</em> Used by unit tests and tools to do low-level manipulations.
|
||||
* @return A Connection instance.
|
||||
*/
|
||||
protected Connection getConnection() {
|
||||
|
@ -248,42 +243,37 @@ public class HTable implements Table {
|
|||
}
|
||||
|
||||
/**
|
||||
* Get the corresponding start keys and regions for an arbitrary range of
|
||||
* keys.
|
||||
* Get the corresponding start keys and regions for an arbitrary range of keys.
|
||||
* <p>
|
||||
* @param startKey Starting row in range, inclusive
|
||||
* @param endKey Ending row in range
|
||||
* @param includeEndKey true if endRow is inclusive, false if exclusive
|
||||
* @return A pair of list of start keys and list of HRegionLocations that
|
||||
* contain the specified range
|
||||
* @return A pair of list of start keys and list of HRegionLocations that contain the specified
|
||||
* range
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
private Pair<List<byte[]>, List<HRegionLocation>> getKeysAndRegionsInRange(
|
||||
final byte[] startKey, final byte[] endKey, final boolean includeEndKey)
|
||||
throws IOException {
|
||||
private Pair<List<byte[]>, List<HRegionLocation>> getKeysAndRegionsInRange(final byte[] startKey,
|
||||
final byte[] endKey, final boolean includeEndKey) throws IOException {
|
||||
return getKeysAndRegionsInRange(startKey, endKey, includeEndKey, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the corresponding start keys and regions for an arbitrary range of
|
||||
* keys.
|
||||
* Get the corresponding start keys and regions for an arbitrary range of keys.
|
||||
* <p>
|
||||
* @param startKey Starting row in range, inclusive
|
||||
* @param endKey Ending row in range
|
||||
* @param includeEndKey true if endRow is inclusive, false if exclusive
|
||||
* @param reload true to reload information or false to use cached information
|
||||
* @return A pair of list of start keys and list of HRegionLocations that
|
||||
* contain the specified range
|
||||
* @return A pair of list of start keys and list of HRegionLocations that contain the specified
|
||||
* range
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
private Pair<List<byte[]>, List<HRegionLocation>> getKeysAndRegionsInRange(
|
||||
final byte[] startKey, final byte[] endKey, final boolean includeEndKey,
|
||||
final boolean reload) throws IOException {
|
||||
private Pair<List<byte[]>, List<HRegionLocation>> getKeysAndRegionsInRange(final byte[] startKey,
|
||||
final byte[] endKey, final boolean includeEndKey, final boolean reload) throws IOException {
|
||||
final boolean endKeyIsEndOfTable = Bytes.equals(endKey, HConstants.EMPTY_END_ROW);
|
||||
if ((Bytes.compareTo(startKey, endKey) > 0) && !endKeyIsEndOfTable) {
|
||||
throw new IllegalArgumentException(
|
||||
"Invalid range: " + Bytes.toStringBinary(startKey) +
|
||||
" > " + Bytes.toStringBinary(endKey));
|
||||
throw new IllegalArgumentException("Invalid range: " + Bytes.toStringBinary(startKey) + " > "
|
||||
+ Bytes.toStringBinary(endKey));
|
||||
}
|
||||
List<byte[]> keysInRange = new ArrayList<>();
|
||||
List<HRegionLocation> regionsInRange = new ArrayList<>();
|
||||
|
@ -300,11 +290,14 @@ public class HTable implements Table {
|
|||
}
|
||||
|
||||
/**
|
||||
* The underlying {@link HTable} must not be closed.
|
||||
* {@link Table#getScanner(Scan)} has other usage details.
|
||||
* The underlying {@link HTable} must not be closed. {@link Table#getScanner(Scan)} has other
|
||||
* usage details.
|
||||
*/
|
||||
@Override
|
||||
public ResultScanner getScanner(Scan scan) throws IOException {
|
||||
final Span span = new TableOperationSpanBuilder(connection).setTableName(tableName)
|
||||
.setOperation(scan).build();
|
||||
try (Scope ignored = span.makeCurrent()) {
|
||||
if (scan.getCaching() <= 0) {
|
||||
scan.setCaching(scannerCaching);
|
||||
}
|
||||
|
@ -315,31 +308,28 @@ public class HTable implements Table {
|
|||
// it is not supposed to be set by user, clear
|
||||
scan.resetMvccReadPoint();
|
||||
}
|
||||
Boolean async = scan.isAsyncPrefetch();
|
||||
if (async == null) {
|
||||
async = connConfiguration.isClientScannerAsyncPrefetch();
|
||||
}
|
||||
final boolean async = scan.isAsyncPrefetch() != null ? scan.isAsyncPrefetch()
|
||||
: connConfiguration.isClientScannerAsyncPrefetch();
|
||||
final int timeout = connConfiguration.getReplicaCallTimeoutMicroSecondScan();
|
||||
|
||||
if (scan.isReversed()) {
|
||||
return new ReversedClientScanner(getConfiguration(), scan, getName(),
|
||||
this.connection, this.rpcCallerFactory, this.rpcControllerFactory,
|
||||
pool, connConfiguration.getReplicaCallTimeoutMicroSecondScan());
|
||||
return new ReversedClientScanner(getConfiguration(), scan, getName(), connection,
|
||||
rpcCallerFactory, rpcControllerFactory, pool, timeout);
|
||||
} else {
|
||||
if (async) {
|
||||
return new ClientAsyncPrefetchScanner(getConfiguration(), scan, getName(), this.connection,
|
||||
this.rpcCallerFactory, this.rpcControllerFactory,
|
||||
pool, connConfiguration.getReplicaCallTimeoutMicroSecondScan());
|
||||
return new ClientAsyncPrefetchScanner(getConfiguration(), scan, getName(), connection,
|
||||
rpcCallerFactory, rpcControllerFactory, pool, timeout);
|
||||
} else {
|
||||
return new ClientSimpleScanner(getConfiguration(), scan, getName(), this.connection,
|
||||
this.rpcCallerFactory, this.rpcControllerFactory,
|
||||
pool, connConfiguration.getReplicaCallTimeoutMicroSecondScan());
|
||||
return new ClientSimpleScanner(getConfiguration(), scan, getName(), connection,
|
||||
rpcCallerFactory, rpcControllerFactory, pool, timeout);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* The underlying {@link HTable} must not be closed.
|
||||
* {@link Table#getScanner(byte[])} has other usage details.
|
||||
* The underlying {@link HTable} must not be closed. {@link Table#getScanner(byte[])} has other
|
||||
* usage details.
|
||||
*/
|
||||
@Override
|
||||
public ResultScanner getScanner(byte[] family) throws IOException {
|
||||
|
@ -349,12 +339,11 @@ public class HTable implements Table {
|
|||
}
|
||||
|
||||
/**
|
||||
* The underlying {@link HTable} must not be closed.
|
||||
* {@link Table#getScanner(byte[], byte[])} has other usage details.
|
||||
* The underlying {@link HTable} must not be closed. {@link Table#getScanner(byte[], byte[])} has
|
||||
* other usage details.
|
||||
*/
|
||||
@Override
|
||||
public ResultScanner getScanner(byte [] family, byte [] qualifier)
|
||||
throws IOException {
|
||||
public ResultScanner getScanner(byte[] family, byte[] qualifier) throws IOException {
|
||||
Scan scan = new Scan();
|
||||
scan.addColumn(family, qualifier);
|
||||
return getScanner(scan);
|
||||
|
@ -362,9 +351,8 @@ public class HTable implements Table {
|
|||
|
||||
@Override
|
||||
public Result get(final Get get) throws IOException {
|
||||
final Supplier<Span> supplier = new TableOperationSpanBuilder(connection)
|
||||
.setTableName(tableName)
|
||||
.setOperation(get);
|
||||
final Supplier<Span> supplier =
|
||||
new TableOperationSpanBuilder(connection).setTableName(tableName).setOperation(get);
|
||||
return TraceUtil.trace(() -> get(get, get.isCheckExistenceOnly()), supplier);
|
||||
}
|
||||
|
||||
|
@ -380,15 +368,15 @@ public class HTable implements Table {
|
|||
|
||||
if (get.getConsistency() == Consistency.STRONG) {
|
||||
final Get configuredGet = get;
|
||||
ClientServiceCallable<Result> callable = new ClientServiceCallable<Result>(this.connection, getName(),
|
||||
get.getRow(), this.rpcControllerFactory.newController(), get.getPriority()) {
|
||||
ClientServiceCallable<Result> callable = new ClientServiceCallable<Result>(this.connection,
|
||||
getName(), get.getRow(), this.rpcControllerFactory.newController(), get.getPriority()) {
|
||||
@Override
|
||||
protected Result rpcCall() throws Exception {
|
||||
ClientProtos.GetRequest request = RequestConverter.buildGetRequest(
|
||||
getLocation().getRegionInfo().getRegionName(), configuredGet);
|
||||
ClientProtos.GetRequest request = RequestConverter
|
||||
.buildGetRequest(getLocation().getRegionInfo().getRegionName(), configuredGet);
|
||||
ClientProtos.GetResponse response = doGet(request);
|
||||
return response == null? null:
|
||||
ProtobufUtil.toResult(response.getResult(), getRpcControllerCellScanner());
|
||||
return response == null ? null
|
||||
: ProtobufUtil.toResult(response.getResult(), getRpcControllerCellScanner());
|
||||
}
|
||||
};
|
||||
return rpcCallerFactory.<Result> newCaller(readRpcTimeoutMs).callWithRetries(callable,
|
||||
|
@ -396,19 +384,18 @@ public class HTable implements Table {
|
|||
}
|
||||
|
||||
// Call that takes into account the replica
|
||||
RpcRetryingCallerWithReadReplicas callable = new RpcRetryingCallerWithReadReplicas(
|
||||
rpcControllerFactory, tableName, this.connection, get, pool,
|
||||
connConfiguration.getRetriesNumber(), operationTimeoutMs, readRpcTimeoutMs,
|
||||
RpcRetryingCallerWithReadReplicas callable =
|
||||
new RpcRetryingCallerWithReadReplicas(rpcControllerFactory, tableName, this.connection, get,
|
||||
pool, connConfiguration.getRetriesNumber(), operationTimeoutMs, readRpcTimeoutMs,
|
||||
connConfiguration.getPrimaryCallTimeoutMicroSecond());
|
||||
return callable.call(operationTimeoutMs);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Result[] get(List<Get> gets) throws IOException {
|
||||
final Supplier<Span> supplier = new TableOperationSpanBuilder(connection)
|
||||
.setTableName(tableName)
|
||||
.setOperation(HBaseSemanticAttributes.Operation.BATCH)
|
||||
.setContainerOperations(gets);
|
||||
final Supplier<Span> supplier =
|
||||
new TableOperationSpanBuilder(connection).setTableName(tableName)
|
||||
.setOperation(HBaseSemanticAttributes.Operation.BATCH).setContainerOperations(gets);
|
||||
return TraceUtil.trace(() -> {
|
||||
if (gets.size() == 1) {
|
||||
return new Result[] { get(gets.get(0)) };
|
||||
|
@ -458,19 +445,12 @@ public class HTable implements Table {
|
|||
|
||||
public void batch(final List<? extends Row> actions, final Object[] results, int rpcTimeout)
|
||||
throws InterruptedException, IOException {
|
||||
AsyncProcessTask task = AsyncProcessTask.newBuilder()
|
||||
.setPool(pool)
|
||||
.setTableName(tableName)
|
||||
.setRowAccess(actions)
|
||||
.setResults(results)
|
||||
.setRpcTimeout(rpcTimeout)
|
||||
.setOperationTimeout(operationTimeoutMs)
|
||||
.setSubmittedRows(AsyncProcessTask.SubmittedRows.ALL)
|
||||
.build();
|
||||
final Span span = new TableOperationSpanBuilder(connection)
|
||||
.setTableName(tableName)
|
||||
.setOperation(HBaseSemanticAttributes.Operation.BATCH)
|
||||
.setContainerOperations(actions)
|
||||
AsyncProcessTask task =
|
||||
AsyncProcessTask.newBuilder().setPool(pool).setTableName(tableName).setRowAccess(actions)
|
||||
.setResults(results).setRpcTimeout(rpcTimeout).setOperationTimeout(operationTimeoutMs)
|
||||
.setSubmittedRows(AsyncProcessTask.SubmittedRows.ALL).build();
|
||||
final Span span = new TableOperationSpanBuilder(connection).setTableName(tableName)
|
||||
.setOperation(HBaseSemanticAttributes.Operation.BATCH).setContainerOperations(actions)
|
||||
.build();
|
||||
try (Scope ignored = span.makeCurrent()) {
|
||||
AsyncRequestFuture ars = multiAp.submit(task);
|
||||
|
@ -486,9 +466,8 @@ public class HTable implements Table {
|
|||
}
|
||||
|
||||
@Override
|
||||
public <R> void batchCallback(
|
||||
final List<? extends Row> actions, final Object[] results, final Batch.Callback<R> callback)
|
||||
throws IOException, InterruptedException {
|
||||
public <R> void batchCallback(final List<? extends Row> actions, final Object[] results,
|
||||
final Batch.Callback<R> callback) throws IOException, InterruptedException {
|
||||
doBatchWithCallback(actions, results, callback, connection, pool, tableName);
|
||||
}
|
||||
|
||||
|
@ -499,19 +478,12 @@ public class HTable implements Table {
|
|||
int writeTimeout = connection.getConfiguration().getInt(HConstants.HBASE_RPC_WRITE_TIMEOUT_KEY,
|
||||
connection.getConfiguration().getInt(HConstants.HBASE_RPC_TIMEOUT_KEY,
|
||||
HConstants.DEFAULT_HBASE_RPC_TIMEOUT));
|
||||
AsyncProcessTask<R> task = AsyncProcessTask.newBuilder(callback)
|
||||
.setPool(pool)
|
||||
.setTableName(tableName)
|
||||
.setRowAccess(actions)
|
||||
.setResults(results)
|
||||
.setOperationTimeout(operationTimeout)
|
||||
.setRpcTimeout(writeTimeout)
|
||||
.setSubmittedRows(AsyncProcessTask.SubmittedRows.ALL)
|
||||
.build();
|
||||
final Span span = new TableOperationSpanBuilder(connection)
|
||||
.setTableName(tableName)
|
||||
.setOperation(HBaseSemanticAttributes.Operation.BATCH)
|
||||
.setContainerOperations(actions)
|
||||
AsyncProcessTask<R> task = AsyncProcessTask.newBuilder(callback).setPool(pool)
|
||||
.setTableName(tableName).setRowAccess(actions).setResults(results)
|
||||
.setOperationTimeout(operationTimeout).setRpcTimeout(writeTimeout)
|
||||
.setSubmittedRows(AsyncProcessTask.SubmittedRows.ALL).build();
|
||||
final Span span = new TableOperationSpanBuilder(connection).setTableName(tableName)
|
||||
.setOperation(HBaseSemanticAttributes.Operation.BATCH).setContainerOperations(actions)
|
||||
.build();
|
||||
try (Scope ignored = span.makeCurrent()) {
|
||||
AsyncRequestFuture ars = connection.getAsyncProcess().submit(task);
|
||||
|
@ -527,9 +499,8 @@ public class HTable implements Table {
|
|||
|
||||
@Override
|
||||
public void delete(final Delete delete) throws IOException {
|
||||
final Supplier<Span> supplier = new TableOperationSpanBuilder(connection)
|
||||
.setTableName(tableName)
|
||||
.setOperation(delete);
|
||||
final Supplier<Span> supplier =
|
||||
new TableOperationSpanBuilder(connection).setTableName(tableName).setOperation(delete);
|
||||
TraceUtil.trace(() -> {
|
||||
ClientServiceCallable<Void> callable =
|
||||
new ClientServiceCallable<Void>(this.connection, getName(), delete.getRow(),
|
||||
|
@ -542,8 +513,8 @@ public class HTable implements Table {
|
|||
return null;
|
||||
}
|
||||
};
|
||||
rpcCallerFactory.<Void>newCaller(this.writeRpcTimeoutMs)
|
||||
.callWithRetries(callable, this.operationTimeoutMs);
|
||||
rpcCallerFactory.<Void> newCaller(this.writeRpcTimeoutMs).callWithRetries(callable,
|
||||
this.operationTimeoutMs);
|
||||
}, supplier);
|
||||
}
|
||||
|
||||
|
@ -570,14 +541,12 @@ public class HTable implements Table {
|
|||
|
||||
@Override
|
||||
public void put(final Put put) throws IOException {
|
||||
final Supplier<Span> supplier = new TableOperationSpanBuilder(connection)
|
||||
.setTableName(tableName)
|
||||
.setOperation(put);
|
||||
final Supplier<Span> supplier =
|
||||
new TableOperationSpanBuilder(connection).setTableName(tableName).setOperation(put);
|
||||
TraceUtil.trace(() -> {
|
||||
validatePut(put);
|
||||
ClientServiceCallable<Void> callable =
|
||||
new ClientServiceCallable<Void>(this.connection, getName(), put.getRow(),
|
||||
this.rpcControllerFactory.newController(), put.getPriority()) {
|
||||
ClientServiceCallable<Void> callable = new ClientServiceCallable<Void>(this.connection,
|
||||
getName(), put.getRow(), this.rpcControllerFactory.newController(), put.getPriority()) {
|
||||
@Override
|
||||
protected Void rpcCall() throws Exception {
|
||||
MutateRequest request = RequestConverter
|
||||
|
@ -586,8 +555,8 @@ public class HTable implements Table {
|
|||
return null;
|
||||
}
|
||||
};
|
||||
rpcCallerFactory.<Void>newCaller(this.writeRpcTimeoutMs)
|
||||
.callWithRetries(callable, this.operationTimeoutMs);
|
||||
rpcCallerFactory.<Void> newCaller(this.writeRpcTimeoutMs).callWithRetries(callable,
|
||||
this.operationTimeoutMs);
|
||||
}, supplier);
|
||||
}
|
||||
|
||||
|
@ -606,16 +575,15 @@ public class HTable implements Table {
|
|||
|
||||
@Override
|
||||
public Result mutateRow(final RowMutations rm) throws IOException {
|
||||
final Supplier<Span> supplier = new TableOperationSpanBuilder(connection)
|
||||
.setTableName(tableName)
|
||||
.setOperation(HBaseSemanticAttributes.Operation.BATCH)
|
||||
.setContainerOperations(rm);
|
||||
final Supplier<Span> supplier =
|
||||
new TableOperationSpanBuilder(connection).setTableName(tableName)
|
||||
.setOperation(HBaseSemanticAttributes.Operation.BATCH).setContainerOperations(rm);
|
||||
return TraceUtil.trace(() -> {
|
||||
long nonceGroup = getNonceGroup();
|
||||
long nonce = getNonce();
|
||||
CancellableRegionServerCallable<MultiResponse> callable =
|
||||
new CancellableRegionServerCallable<MultiResponse>(this.connection, getName(), rm.getRow(),
|
||||
rpcControllerFactory.newController(), writeRpcTimeoutMs,
|
||||
new CancellableRegionServerCallable<MultiResponse>(this.connection, getName(),
|
||||
rm.getRow(), rpcControllerFactory.newController(), writeRpcTimeoutMs,
|
||||
new RetryingTimeTracker().start(), rm.getMaxPriority()) {
|
||||
@Override
|
||||
protected MultiResponse rpcCall() throws Exception {
|
||||
|
@ -628,22 +596,17 @@ public class HTable implements Table {
|
|||
if (ex instanceof IOException) {
|
||||
throw (IOException) ex;
|
||||
}
|
||||
throw new IOException("Failed to mutate row: " + Bytes.toStringBinary(rm.getRow()), ex);
|
||||
throw new IOException("Failed to mutate row: " + Bytes.toStringBinary(rm.getRow()),
|
||||
ex);
|
||||
}
|
||||
return ResponseConverter.getResults(request, response, getRpcControllerCellScanner());
|
||||
}
|
||||
};
|
||||
Object[] results = new Object[rm.getMutations().size()];
|
||||
AsyncProcessTask task = AsyncProcessTask.newBuilder()
|
||||
.setPool(pool)
|
||||
.setTableName(tableName)
|
||||
.setRowAccess(rm.getMutations())
|
||||
.setCallable(callable)
|
||||
.setRpcTimeout(writeRpcTimeoutMs)
|
||||
AsyncProcessTask task = AsyncProcessTask.newBuilder().setPool(pool).setTableName(tableName)
|
||||
.setRowAccess(rm.getMutations()).setCallable(callable).setRpcTimeout(writeRpcTimeoutMs)
|
||||
.setOperationTimeout(operationTimeoutMs)
|
||||
.setSubmittedRows(AsyncProcessTask.SubmittedRows.ALL)
|
||||
.setResults(results)
|
||||
.build();
|
||||
.setSubmittedRows(AsyncProcessTask.SubmittedRows.ALL).setResults(results).build();
|
||||
AsyncRequestFuture ars = multiAp.submit(task);
|
||||
ars.waitUntilDone();
|
||||
if (ars.hasError()) {
|
||||
|
@ -663,9 +626,8 @@ public class HTable implements Table {
|
|||
|
||||
@Override
|
||||
public Result append(final Append append) throws IOException {
|
||||
final Supplier<Span> supplier = new TableOperationSpanBuilder(connection)
|
||||
.setTableName(tableName)
|
||||
.setOperation(append);
|
||||
final Supplier<Span> supplier =
|
||||
new TableOperationSpanBuilder(connection).setTableName(tableName).setOperation(append);
|
||||
return TraceUtil.trace(() -> {
|
||||
checkHasFamilies(append);
|
||||
NoncedRegionServerCallable<Result> callable =
|
||||
|
@ -673,9 +635,9 @@ public class HTable implements Table {
|
|||
this.rpcControllerFactory.newController(), append.getPriority()) {
|
||||
@Override
|
||||
protected Result rpcCall() throws Exception {
|
||||
MutateRequest request = RequestConverter.buildMutateRequest(
|
||||
getLocation().getRegionInfo().getRegionName(), append, super.getNonceGroup(),
|
||||
super.getNonce());
|
||||
MutateRequest request =
|
||||
RequestConverter.buildMutateRequest(getLocation().getRegionInfo().getRegionName(),
|
||||
append, super.getNonceGroup(), super.getNonce());
|
||||
MutateResponse response = doMutate(request);
|
||||
if (!response.hasResult()) {
|
||||
return null;
|
||||
|
@ -683,16 +645,15 @@ public class HTable implements Table {
|
|||
return ProtobufUtil.toResult(response.getResult(), getRpcControllerCellScanner());
|
||||
}
|
||||
};
|
||||
return rpcCallerFactory.<Result> newCaller(this.writeRpcTimeoutMs).
|
||||
callWithRetries(callable, this.operationTimeoutMs);
|
||||
return rpcCallerFactory.<Result> newCaller(this.writeRpcTimeoutMs).callWithRetries(callable,
|
||||
this.operationTimeoutMs);
|
||||
}, supplier);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Result increment(final Increment increment) throws IOException {
|
||||
final Supplier<Span> supplier = new TableOperationSpanBuilder(connection)
|
||||
.setTableName(tableName)
|
||||
.setOperation(increment);
|
||||
final Supplier<Span> supplier =
|
||||
new TableOperationSpanBuilder(connection).setTableName(tableName).setOperation(increment);
|
||||
return TraceUtil.trace(() -> {
|
||||
checkHasFamilies(increment);
|
||||
NoncedRegionServerCallable<Result> callable =
|
||||
|
@ -700,9 +661,9 @@ public class HTable implements Table {
|
|||
this.rpcControllerFactory.newController(), increment.getPriority()) {
|
||||
@Override
|
||||
protected Result rpcCall() throws Exception {
|
||||
MutateRequest request = RequestConverter.buildMutateRequest(
|
||||
getLocation().getRegionInfo().getRegionName(), increment, super.getNonceGroup(),
|
||||
super.getNonce());
|
||||
MutateRequest request =
|
||||
RequestConverter.buildMutateRequest(getLocation().getRegionInfo().getRegionName(),
|
||||
increment, super.getNonceGroup(), super.getNonce());
|
||||
MutateResponse response = doMutate(request);
|
||||
// Should this check for null like append does?
|
||||
return ProtobufUtil.toResult(response.getResult(), getRpcControllerCellScanner());
|
||||
|
@ -714,19 +675,16 @@ public class HTable implements Table {
|
|||
}
|
||||
|
||||
@Override
|
||||
public long incrementColumnValue(final byte [] row, final byte [] family,
|
||||
final byte [] qualifier, final long amount)
|
||||
throws IOException {
|
||||
public long incrementColumnValue(final byte[] row, final byte[] family, final byte[] qualifier,
|
||||
final long amount) throws IOException {
|
||||
return incrementColumnValue(row, family, qualifier, amount, Durability.SYNC_WAL);
|
||||
}
|
||||
|
||||
@Override
|
||||
public long incrementColumnValue(final byte [] row, final byte [] family,
|
||||
final byte [] qualifier, final long amount, final Durability durability)
|
||||
throws IOException {
|
||||
public long incrementColumnValue(final byte[] row, final byte[] family, final byte[] qualifier,
|
||||
final long amount, final Durability durability) throws IOException {
|
||||
final Supplier<Span> supplier = new TableOperationSpanBuilder(connection)
|
||||
.setTableName(tableName)
|
||||
.setOperation(HBaseSemanticAttributes.Operation.INCREMENT);
|
||||
.setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.INCREMENT);
|
||||
return TraceUtil.trace(() -> {
|
||||
NullPointerException npe = null;
|
||||
if (row == null) {
|
||||
|
@ -735,8 +693,7 @@ public class HTable implements Table {
|
|||
npe = new NullPointerException("family is null");
|
||||
}
|
||||
if (npe != null) {
|
||||
throw new IOException(
|
||||
"Invalid arguments to incrementColumnValue", npe);
|
||||
throw new IOException("Invalid arguments to incrementColumnValue", npe);
|
||||
}
|
||||
|
||||
NoncedRegionServerCallable<Long> callable =
|
||||
|
@ -745,16 +702,16 @@ public class HTable implements Table {
|
|||
@Override
|
||||
protected Long rpcCall() throws Exception {
|
||||
MutateRequest request = RequestConverter.buildIncrementRequest(
|
||||
getLocation().getRegionInfo().getRegionName(), row, family,
|
||||
qualifier, amount, durability, super.getNonceGroup(), super.getNonce());
|
||||
getLocation().getRegionInfo().getRegionName(), row, family, qualifier, amount,
|
||||
durability, super.getNonceGroup(), super.getNonce());
|
||||
MutateResponse response = doMutate(request);
|
||||
Result result =
|
||||
ProtobufUtil.toResult(response.getResult(), getRpcControllerCellScanner());
|
||||
return Long.valueOf(Bytes.toLong(result.getValue(family, qualifier)));
|
||||
}
|
||||
};
|
||||
return rpcCallerFactory.<Long> newCaller(this.writeRpcTimeoutMs).
|
||||
callWithRetries(callable, this.operationTimeoutMs);
|
||||
return rpcCallerFactory.<Long> newCaller(this.writeRpcTimeoutMs).callWithRetries(callable,
|
||||
this.operationTimeoutMs);
|
||||
}, supplier);
|
||||
}
|
||||
|
||||
|
@ -763,9 +720,9 @@ public class HTable implements Table {
|
|||
public boolean checkAndPut(final byte[] row, final byte[] family, final byte[] qualifier,
|
||||
final byte[] value, final Put put) throws IOException {
|
||||
final Supplier<Span> supplier = new TableOperationSpanBuilder(connection)
|
||||
.setTableName(tableName)
|
||||
.setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE)
|
||||
.setContainerOperations(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE, HBaseSemanticAttributes.Operation.PUT);
|
||||
.setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE)
|
||||
.setContainerOperations(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE,
|
||||
HBaseSemanticAttributes.Operation.PUT);
|
||||
return TraceUtil.trace(
|
||||
() -> doCheckAndMutate(row, family, qualifier, CompareOperator.EQUAL, value, null, null, put)
|
||||
.isSuccess(),
|
||||
|
@ -777,13 +734,11 @@ public class HTable implements Table {
|
|||
public boolean checkAndPut(final byte[] row, final byte[] family, final byte[] qualifier,
|
||||
final CompareOp compareOp, final byte[] value, final Put put) throws IOException {
|
||||
final Supplier<Span> supplier = new TableOperationSpanBuilder(connection)
|
||||
.setTableName(tableName)
|
||||
.setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE)
|
||||
.setContainerOperations(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE, HBaseSemanticAttributes.Operation.PUT);
|
||||
return TraceUtil.trace(
|
||||
() -> doCheckAndMutate(row, family, qualifier, toCompareOperator(compareOp), value, null,
|
||||
null, put).isSuccess(),
|
||||
supplier);
|
||||
.setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE)
|
||||
.setContainerOperations(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE,
|
||||
HBaseSemanticAttributes.Operation.PUT);
|
||||
return TraceUtil.trace(() -> doCheckAndMutate(row, family, qualifier,
|
||||
toCompareOperator(compareOp), value, null, null, put).isSuccess(), supplier);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -791,9 +746,9 @@ public class HTable implements Table {
|
|||
public boolean checkAndPut(final byte[] row, final byte[] family, final byte[] qualifier,
|
||||
final CompareOperator op, final byte[] value, final Put put) throws IOException {
|
||||
final Supplier<Span> supplier = new TableOperationSpanBuilder(connection)
|
||||
.setTableName(tableName)
|
||||
.setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE)
|
||||
.setContainerOperations(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE, HBaseSemanticAttributes.Operation.PUT);
|
||||
.setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE)
|
||||
.setContainerOperations(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE,
|
||||
HBaseSemanticAttributes.Operation.PUT);
|
||||
return TraceUtil.trace(
|
||||
() -> doCheckAndMutate(row, family, qualifier, op, value, null, null, put).isSuccess(),
|
||||
supplier);
|
||||
|
@ -804,13 +759,11 @@ public class HTable implements Table {
|
|||
public boolean checkAndDelete(final byte[] row, final byte[] family, final byte[] qualifier,
|
||||
final byte[] value, final Delete delete) throws IOException {
|
||||
final Supplier<Span> supplier = new TableOperationSpanBuilder(connection)
|
||||
.setTableName(tableName)
|
||||
.setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE)
|
||||
.setContainerOperations(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE, HBaseSemanticAttributes.Operation.DELETE);
|
||||
return TraceUtil.trace(
|
||||
() -> doCheckAndMutate(row, family, qualifier, CompareOperator.EQUAL, value, null, null,
|
||||
delete).isSuccess(),
|
||||
supplier);
|
||||
.setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE)
|
||||
.setContainerOperations(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE,
|
||||
HBaseSemanticAttributes.Operation.DELETE);
|
||||
return TraceUtil.trace(() -> doCheckAndMutate(row, family, qualifier, CompareOperator.EQUAL,
|
||||
value, null, null, delete).isSuccess(), supplier);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -818,13 +771,11 @@ public class HTable implements Table {
|
|||
public boolean checkAndDelete(final byte[] row, final byte[] family, final byte[] qualifier,
|
||||
final CompareOp compareOp, final byte[] value, final Delete delete) throws IOException {
|
||||
final Supplier<Span> supplier = new TableOperationSpanBuilder(connection)
|
||||
.setTableName(tableName)
|
||||
.setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE)
|
||||
.setContainerOperations(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE, HBaseSemanticAttributes.Operation.DELETE);
|
||||
return TraceUtil.trace(
|
||||
() -> doCheckAndMutate(row, family, qualifier, toCompareOperator(compareOp), value, null,
|
||||
null, delete).isSuccess(),
|
||||
supplier);
|
||||
.setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE)
|
||||
.setContainerOperations(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE,
|
||||
HBaseSemanticAttributes.Operation.DELETE);
|
||||
return TraceUtil.trace(() -> doCheckAndMutate(row, family, qualifier,
|
||||
toCompareOperator(compareOp), value, null, null, delete).isSuccess(), supplier);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -832,9 +783,9 @@ public class HTable implements Table {
|
|||
public boolean checkAndDelete(final byte[] row, final byte[] family, final byte[] qualifier,
|
||||
final CompareOperator op, final byte[] value, final Delete delete) throws IOException {
|
||||
final Supplier<Span> supplier = new TableOperationSpanBuilder(connection)
|
||||
.setTableName(tableName)
|
||||
.setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE)
|
||||
.setContainerOperations(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE, HBaseSemanticAttributes.Operation.DELETE);
|
||||
.setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE)
|
||||
.setContainerOperations(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE,
|
||||
HBaseSemanticAttributes.Operation.DELETE);
|
||||
return TraceUtil.trace(
|
||||
() -> doCheckAndMutate(row, family, qualifier, op, value, null, null, delete).isSuccess(),
|
||||
supplier);
|
||||
|
@ -859,13 +810,13 @@ public class HTable implements Table {
|
|||
long nonce = getNonce();
|
||||
CancellableRegionServerCallable<MultiResponse> callable =
|
||||
new CancellableRegionServerCallable<MultiResponse>(connection, getName(), rm.getRow(),
|
||||
rpcControllerFactory.newController(), writeRpcTimeoutMs, new RetryingTimeTracker().start(),
|
||||
rm.getMaxPriority()) {
|
||||
rpcControllerFactory.newController(), writeRpcTimeoutMs,
|
||||
new RetryingTimeTracker().start(), rm.getMaxPriority()) {
|
||||
@Override
|
||||
protected MultiResponse rpcCall() throws Exception {
|
||||
MultiRequest request = RequestConverter
|
||||
.buildMultiRequest(getLocation().getRegionInfo().getRegionName(), row, family,
|
||||
qualifier, op, value, filter, timeRange, rm, nonceGroup, nonce);
|
||||
MultiRequest request =
|
||||
RequestConverter.buildMultiRequest(getLocation().getRegionInfo().getRegionName(),
|
||||
row, family, qualifier, op, value, filter, timeRange, rm, nonceGroup, nonce);
|
||||
ClientProtos.MultiResponse response = doMulti(request);
|
||||
ClientProtos.RegionActionResult res = response.getRegionActionResultList().get(0);
|
||||
if (res.hasException()) {
|
||||
|
@ -881,21 +832,16 @@ public class HTable implements Table {
|
|||
};
|
||||
|
||||
/**
|
||||
* Currently, we use one array to store 'processed' flag which is returned by server.
|
||||
* It is excessive to send such a large array, but that is required by the framework right now
|
||||
* */
|
||||
* Currently, we use one array to store 'processed' flag which is returned by server. It is
|
||||
* excessive to send such a large array, but that is required by the framework right now
|
||||
*/
|
||||
Object[] results = new Object[rm.getMutations().size()];
|
||||
AsyncProcessTask task = AsyncProcessTask.newBuilder()
|
||||
.setPool(pool)
|
||||
.setTableName(tableName)
|
||||
.setRowAccess(rm.getMutations())
|
||||
.setResults(results)
|
||||
.setCallable(callable)
|
||||
AsyncProcessTask task = AsyncProcessTask.newBuilder().setPool(pool).setTableName(tableName)
|
||||
.setRowAccess(rm.getMutations()).setResults(results).setCallable(callable)
|
||||
// TODO any better timeout?
|
||||
.setRpcTimeout(Math.max(readRpcTimeoutMs, writeRpcTimeoutMs))
|
||||
.setOperationTimeout(operationTimeoutMs)
|
||||
.setSubmittedRows(AsyncProcessTask.SubmittedRows.ALL)
|
||||
.build();
|
||||
.setSubmittedRows(AsyncProcessTask.SubmittedRows.ALL).build();
|
||||
AsyncRequestFuture ars = multiAp.submit(task);
|
||||
ars.waitUntilDone();
|
||||
if (ars.hasError()) {
|
||||
|
@ -910,13 +856,10 @@ public class HTable implements Table {
|
|||
public boolean checkAndMutate(final byte[] row, final byte[] family, final byte[] qualifier,
|
||||
final CompareOp compareOp, final byte[] value, final RowMutations rm) throws IOException {
|
||||
final Supplier<Span> supplier = new TableOperationSpanBuilder(connection)
|
||||
.setTableName(tableName)
|
||||
.setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE)
|
||||
.setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE)
|
||||
.setContainerOperations(rm);
|
||||
return TraceUtil.trace(
|
||||
() -> doCheckAndMutate(row, family, qualifier, toCompareOperator(compareOp), value, null,
|
||||
null, rm).isSuccess(),
|
||||
supplier);
|
||||
return TraceUtil.trace(() -> doCheckAndMutate(row, family, qualifier,
|
||||
toCompareOperator(compareOp), value, null, null, rm).isSuccess(), supplier);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -924,8 +867,7 @@ public class HTable implements Table {
|
|||
public boolean checkAndMutate(final byte[] row, final byte[] family, final byte[] qualifier,
|
||||
final CompareOperator op, final byte[] value, final RowMutations rm) throws IOException {
|
||||
final Supplier<Span> supplier = new TableOperationSpanBuilder(connection)
|
||||
.setTableName(tableName)
|
||||
.setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE)
|
||||
.setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE)
|
||||
.setContainerOperations(rm);
|
||||
return TraceUtil.trace(
|
||||
() -> doCheckAndMutate(row, family, qualifier, op, value, null, null, rm).isSuccess(),
|
||||
|
@ -934,14 +876,13 @@ public class HTable implements Table {
|
|||
|
||||
@Override
|
||||
public CheckAndMutateResult checkAndMutate(CheckAndMutate checkAndMutate) throws IOException {
|
||||
final Supplier<Span> supplier = new TableOperationSpanBuilder(connection)
|
||||
.setTableName(tableName)
|
||||
.setOperation(checkAndMutate)
|
||||
.setContainerOperations(checkAndMutate);
|
||||
final Supplier<Span> supplier =
|
||||
new TableOperationSpanBuilder(connection).setTableName(tableName)
|
||||
.setOperation(checkAndMutate).setContainerOperations(checkAndMutate);
|
||||
return TraceUtil.trace(() -> {
|
||||
Row action = checkAndMutate.getAction();
|
||||
if (action instanceof Put || action instanceof Delete || action instanceof Increment ||
|
||||
action instanceof Append) {
|
||||
if (action instanceof Put || action instanceof Delete || action instanceof Increment
|
||||
|| action instanceof Append) {
|
||||
if (action instanceof Put) {
|
||||
validatePut((Put) action);
|
||||
}
|
||||
|
@ -985,8 +926,7 @@ public class HTable implements Table {
|
|||
public List<CheckAndMutateResult> checkAndMutate(List<CheckAndMutate> checkAndMutates)
|
||||
throws IOException {
|
||||
final Supplier<Span> supplier = new TableOperationSpanBuilder(connection)
|
||||
.setTableName(tableName)
|
||||
.setOperation(HBaseSemanticAttributes.Operation.BATCH)
|
||||
.setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.BATCH)
|
||||
.setContainerOperations(checkAndMutates);
|
||||
return TraceUtil.trace(() -> {
|
||||
if (checkAndMutates.isEmpty()) {
|
||||
|
@ -1043,9 +983,8 @@ public class HTable implements Table {
|
|||
|
||||
@Override
|
||||
public boolean exists(final Get get) throws IOException {
|
||||
final Supplier<Span> supplier = new TableOperationSpanBuilder(connection)
|
||||
.setTableName(tableName)
|
||||
.setOperation(get);
|
||||
final Supplier<Span> supplier =
|
||||
new TableOperationSpanBuilder(connection).setTableName(tableName).setOperation(get);
|
||||
return TraceUtil.trace(() -> {
|
||||
Result r = get(get, true);
|
||||
assert r.getExists() != null;
|
||||
|
@ -1055,10 +994,9 @@ public class HTable implements Table {
|
|||
|
||||
@Override
|
||||
public boolean[] exists(List<Get> gets) throws IOException {
|
||||
final Supplier<Span> supplier = new TableOperationSpanBuilder(connection)
|
||||
.setTableName(tableName)
|
||||
.setOperation(HBaseSemanticAttributes.Operation.BATCH)
|
||||
.setContainerOperations(gets);
|
||||
final Supplier<Span> supplier =
|
||||
new TableOperationSpanBuilder(connection).setTableName(tableName)
|
||||
.setOperation(HBaseSemanticAttributes.Operation.BATCH).setContainerOperations(gets);
|
||||
return TraceUtil.trace(() -> {
|
||||
if (gets.isEmpty()) {
|
||||
return new boolean[] {};
|
||||
|
@ -1094,28 +1032,23 @@ public class HTable implements Table {
|
|||
}
|
||||
|
||||
/**
|
||||
* Process a mixed batch of Get, Put and Delete actions. All actions for a
|
||||
* RegionServer are forwarded in one RPC call. Queries are executed in parallel.
|
||||
*
|
||||
* Process a mixed batch of Get, Put and Delete actions. All actions for a RegionServer are
|
||||
* forwarded in one RPC call. Queries are executed in parallel.
|
||||
* @param list The collection of actions.
|
||||
* @param results An empty array, same size as list. If an exception is thrown,
|
||||
* you can test here for partial results, and to determine which actions
|
||||
* processed successfully.
|
||||
* @throws IOException if there are problems talking to META. Per-item
|
||||
* exceptions are stored in the results array.
|
||||
* @param results An empty array, same size as list. If an exception is thrown, you can test here
|
||||
* for partial results, and to determine which actions processed successfully.
|
||||
* @throws IOException if there are problems talking to META. Per-item exceptions are stored in
|
||||
* the results array.
|
||||
*/
|
||||
public <R> void processBatchCallback(
|
||||
final List<? extends Row> list, final Object[] results, final Batch.Callback<R> callback)
|
||||
throws IOException, InterruptedException {
|
||||
public <R> void processBatchCallback(final List<? extends Row> list, final Object[] results,
|
||||
final Batch.Callback<R> callback) throws IOException, InterruptedException {
|
||||
this.batchCallback(list, results, callback);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
final Supplier<Span> supplier = new TableSpanBuilder(connection)
|
||||
.setName("HTable.close")
|
||||
.setTableName(tableName)
|
||||
.setSpanKind(SpanKind.INTERNAL);
|
||||
final Supplier<Span> supplier = new TableSpanBuilder(connection).setName("HTable.close")
|
||||
.setTableName(tableName).setSpanKind(SpanKind.INTERNAL);
|
||||
TraceUtil.trace(() -> {
|
||||
if (this.closed) {
|
||||
return;
|
||||
|
@ -1151,8 +1084,8 @@ public class HTable implements Table {
|
|||
}
|
||||
|
||||
/**
|
||||
* Explicitly clears the region cache to fetch the latest value from META.
|
||||
* This is a power user function: avoid unless you know the ramifications.
|
||||
* Explicitly clears the region cache to fetch the latest value from META. This is a power user
|
||||
* function: avoid unless you know the ramifications.
|
||||
*/
|
||||
public void clearRegionCache() {
|
||||
this.connection.clearRegionLocationCache();
|
||||
|
@ -1167,8 +1100,8 @@ public class HTable implements Table {
|
|||
public <T extends Service, R> Map<byte[], R> coprocessorService(final Class<T> service,
|
||||
byte[] startKey, byte[] endKey, final Batch.Call<T, R> callable)
|
||||
throws ServiceException, Throwable {
|
||||
final Map<byte[],R> results = Collections.synchronizedMap(
|
||||
new TreeMap<>(Bytes.BYTES_COMPARATOR));
|
||||
final Map<byte[], R> results =
|
||||
Collections.synchronizedMap(new TreeMap<>(Bytes.BYTES_COMPARATOR));
|
||||
coprocessorService(service, startKey, endKey, callable, (region, row, value) -> {
|
||||
if (region != null) {
|
||||
results.put(region, value);
|
||||
|
@ -1178,12 +1111,11 @@ public class HTable implements Table {
|
|||
}
|
||||
|
||||
@Override
|
||||
public <T extends Service, R> void coprocessorService(final Class<T> service,
|
||||
byte[] startKey, byte[] endKey, final Batch.Call<T,R> callable,
|
||||
final Batch.Callback<R> callback) throws ServiceException, Throwable {
|
||||
public <T extends Service, R> void coprocessorService(final Class<T> service, byte[] startKey,
|
||||
byte[] endKey, final Batch.Call<T, R> callable, final Batch.Callback<R> callback)
|
||||
throws ServiceException, Throwable {
|
||||
final Supplier<Span> supplier = new TableOperationSpanBuilder(connection)
|
||||
.setTableName(tableName)
|
||||
.setOperation(HBaseSemanticAttributes.Operation.COPROC_EXEC);
|
||||
.setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.COPROC_EXEC);
|
||||
TraceUtil.trace(() -> {
|
||||
final Context context = Context.current();
|
||||
final ExecutorService wrappedPool = context.wrap(pool);
|
||||
|
@ -1220,8 +1152,7 @@ public class HTable implements Table {
|
|||
}, supplier);
|
||||
}
|
||||
|
||||
private List<byte[]> getStartKeysInRange(byte[] start, byte[] end)
|
||||
throws IOException {
|
||||
private List<byte[]> getStartKeysInRange(byte[] start, byte[] end) throws IOException {
|
||||
if (start == null) {
|
||||
start = HConstants.EMPTY_START_ROW;
|
||||
}
|
||||
|
@ -1307,10 +1238,10 @@ public class HTable implements Table {
|
|||
|
||||
@Override
|
||||
public <R extends Message> Map<byte[], R> batchCoprocessorService(
|
||||
Descriptors.MethodDescriptor methodDescriptor, Message request,
|
||||
byte[] startKey, byte[] endKey, R responsePrototype) throws ServiceException, Throwable {
|
||||
final Map<byte[], R> results = Collections.synchronizedMap(new TreeMap<>(
|
||||
Bytes.BYTES_COMPARATOR));
|
||||
Descriptors.MethodDescriptor methodDescriptor, Message request, byte[] startKey,
|
||||
byte[] endKey, R responsePrototype) throws ServiceException, Throwable {
|
||||
final Map<byte[], R> results =
|
||||
Collections.synchronizedMap(new TreeMap<>(Bytes.BYTES_COMPARATOR));
|
||||
batchCoprocessorService(methodDescriptor, request, startKey, endKey, responsePrototype,
|
||||
(region, row, result) -> {
|
||||
if (region != null) {
|
||||
|
@ -1322,18 +1253,16 @@ public class HTable implements Table {
|
|||
|
||||
@Override
|
||||
public <R extends Message> void batchCoprocessorService(
|
||||
final Descriptors.MethodDescriptor methodDescriptor, final Message request,
|
||||
byte[] startKey, byte[] endKey, final R responsePrototype, final Callback<R> callback)
|
||||
final Descriptors.MethodDescriptor methodDescriptor, final Message request, byte[] startKey,
|
||||
byte[] endKey, final R responsePrototype, final Callback<R> callback)
|
||||
throws ServiceException, Throwable {
|
||||
final Supplier<Span> supplier = new TableOperationSpanBuilder(connection)
|
||||
.setTableName(tableName)
|
||||
.setOperation(HBaseSemanticAttributes.Operation.COPROC_EXEC);
|
||||
.setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.COPROC_EXEC);
|
||||
TraceUtil.trace(() -> {
|
||||
final Context context = Context.current();
|
||||
final byte[] sanitizedStartKey = Optional.ofNullable(startKey)
|
||||
.orElse(HConstants.EMPTY_START_ROW);
|
||||
final byte[] sanitizedEndKey = Optional.ofNullable(endKey)
|
||||
.orElse(HConstants.EMPTY_END_ROW);
|
||||
final byte[] sanitizedStartKey =
|
||||
Optional.ofNullable(startKey).orElse(HConstants.EMPTY_START_ROW);
|
||||
final byte[] sanitizedEndKey = Optional.ofNullable(endKey).orElse(HConstants.EMPTY_END_ROW);
|
||||
|
||||
// get regions covered by the row range
|
||||
Pair<List<byte[]>, List<HRegionLocation>> keysAndRegions =
|
||||
|
@ -1392,23 +1321,18 @@ public class HTable implements Table {
|
|||
}
|
||||
};
|
||||
AsyncProcessTask<ClientProtos.CoprocessorServiceResult> task =
|
||||
AsyncProcessTask.newBuilder(resultsCallback)
|
||||
.setPool(context.wrap(pool))
|
||||
.setTableName(tableName)
|
||||
.setRowAccess(execs)
|
||||
.setResults(results)
|
||||
.setRpcTimeout(readRpcTimeoutMs)
|
||||
.setOperationTimeout(operationTimeoutMs)
|
||||
.setSubmittedRows(AsyncProcessTask.SubmittedRows.ALL)
|
||||
.build();
|
||||
AsyncProcessTask.newBuilder(resultsCallback).setPool(context.wrap(pool))
|
||||
.setTableName(tableName).setRowAccess(execs).setResults(results)
|
||||
.setRpcTimeout(readRpcTimeoutMs).setOperationTimeout(operationTimeoutMs)
|
||||
.setSubmittedRows(AsyncProcessTask.SubmittedRows.ALL).build();
|
||||
AsyncRequestFuture future = asyncProcess.submit(task);
|
||||
future.waitUntilDone();
|
||||
|
||||
if (future.hasError()) {
|
||||
throw future.getErrors();
|
||||
} else if (!callbackErrorExceptions.isEmpty()) {
|
||||
throw new RetriesExhaustedWithDetailsException(
|
||||
callbackErrorExceptions, callbackErrorActions, callbackErrorServers);
|
||||
throw new RetriesExhaustedWithDetailsException(callbackErrorExceptions,
|
||||
callbackErrorActions, callbackErrorServers);
|
||||
}
|
||||
}, supplier);
|
||||
}
|
||||
|
@ -1434,8 +1358,8 @@ public class HTable implements Table {
|
|||
|
||||
@Override
|
||||
public CheckAndMutateBuilder qualifier(byte[] qualifier) {
|
||||
this.qualifier = Preconditions.checkNotNull(qualifier, "qualifier is null. Consider using" +
|
||||
" an empty byte array, or just do not call this method if you want a null qualifier");
|
||||
this.qualifier = Preconditions.checkNotNull(qualifier, "qualifier is null. Consider using"
|
||||
+ " an empty byte array, or just do not call this method if you want a null qualifier");
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -1460,15 +1384,14 @@ public class HTable implements Table {
|
|||
}
|
||||
|
||||
private void preCheck() {
|
||||
Preconditions.checkNotNull(op, "condition is null. You need to specify the condition by" +
|
||||
" calling ifNotExists/ifEquals/ifMatches before executing the request");
|
||||
Preconditions.checkNotNull(op, "condition is null. You need to specify the condition by"
|
||||
+ " calling ifNotExists/ifEquals/ifMatches before executing the request");
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean thenPut(Put put) throws IOException {
|
||||
final Supplier<Span> supplier = new TableOperationSpanBuilder(connection)
|
||||
.setTableName(tableName)
|
||||
.setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE);
|
||||
.setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE);
|
||||
return TraceUtil.trace(() -> {
|
||||
validatePut(put);
|
||||
preCheck();
|
||||
|
@ -1480,8 +1403,7 @@ public class HTable implements Table {
|
|||
@Override
|
||||
public boolean thenDelete(Delete delete) throws IOException {
|
||||
final Supplier<Span> supplier = new TableOperationSpanBuilder(connection)
|
||||
.setTableName(tableName)
|
||||
.setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE);
|
||||
.setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE);
|
||||
return TraceUtil.trace(() -> {
|
||||
preCheck();
|
||||
return doCheckAndMutate(row, family, qualifier, op, value, null, timeRange, delete)
|
||||
|
@ -1492,8 +1414,7 @@ public class HTable implements Table {
|
|||
@Override
|
||||
public boolean thenMutate(RowMutations mutation) throws IOException {
|
||||
final Supplier<Span> supplier = new TableOperationSpanBuilder(connection)
|
||||
.setTableName(tableName)
|
||||
.setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE);
|
||||
.setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE);
|
||||
return TraceUtil.trace(() -> {
|
||||
preCheck();
|
||||
return doCheckAndMutate(row, family, qualifier, op, value, null, timeRange, mutation)
|
||||
|
@ -1522,20 +1443,17 @@ public class HTable implements Table {
|
|||
@Override
|
||||
public boolean thenPut(Put put) throws IOException {
|
||||
final Supplier<Span> supplier = new TableOperationSpanBuilder(connection)
|
||||
.setTableName(tableName)
|
||||
.setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE);
|
||||
.setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE);
|
||||
return TraceUtil.trace(() -> {
|
||||
validatePut(put);
|
||||
return doCheckAndMutate(row, null, null, null, null, filter, timeRange, put)
|
||||
.isSuccess();
|
||||
return doCheckAndMutate(row, null, null, null, null, filter, timeRange, put).isSuccess();
|
||||
}, supplier);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean thenDelete(Delete delete) throws IOException {
|
||||
final Supplier<Span> supplier = new TableOperationSpanBuilder(connection)
|
||||
.setTableName(tableName)
|
||||
.setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE);
|
||||
.setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE);
|
||||
return TraceUtil.trace(
|
||||
() -> doCheckAndMutate(row, null, null, null, null, filter, timeRange, delete).isSuccess(),
|
||||
supplier);
|
||||
|
@ -1544,10 +1462,9 @@ public class HTable implements Table {
|
|||
@Override
|
||||
public boolean thenMutate(RowMutations mutation) throws IOException {
|
||||
final Supplier<Span> supplier = new TableOperationSpanBuilder(connection)
|
||||
.setTableName(tableName)
|
||||
.setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE);
|
||||
return TraceUtil.trace(
|
||||
() -> doCheckAndMutate(row, null, null, null, null, filter, timeRange, mutation)
|
||||
.setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE);
|
||||
return TraceUtil
|
||||
.trace(() -> doCheckAndMutate(row, null, null, null, null, filter, timeRange, mutation)
|
||||
.isSuccess(),
|
||||
supplier);
|
||||
}
|
||||
|
|
|
@ -26,6 +26,7 @@ import static org.apache.hadoop.hbase.client.ConnectionUtils.validatePutsInRowMu
|
|||
import static org.apache.hadoop.hbase.trace.TraceUtil.tracedFuture;
|
||||
import static org.apache.hadoop.hbase.trace.TraceUtil.tracedFutures;
|
||||
import static org.apache.hadoop.hbase.util.FutureUtils.addListener;
|
||||
|
||||
import com.google.protobuf.RpcChannel;
|
||||
import io.opentelemetry.api.trace.Span;
|
||||
import io.opentelemetry.api.trace.StatusCode;
|
||||
|
@ -58,9 +59,11 @@ import org.apache.hadoop.hbase.util.ReflectionUtils;
|
|||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
|
||||
import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback;
|
||||
import org.apache.hbase.thirdparty.io.netty.util.Timer;
|
||||
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
|
||||
|
@ -127,8 +130,8 @@ class RawAsyncTableImpl implements AsyncTable<AdvancedScanResultConsumer> {
|
|||
this.pauseNs = builder.pauseNs;
|
||||
if (builder.pauseForCQTBENs < builder.pauseNs) {
|
||||
LOG.warn(
|
||||
"Configured value of pauseForCQTBENs is {} ms, which is less than" +
|
||||
" the normal pause value {} ms, use the greater one instead",
|
||||
"Configured value of pauseForCQTBENs is {} ms, which is less than"
|
||||
+ " the normal pause value {} ms, use the greater one instead",
|
||||
TimeUnit.NANOSECONDS.toMillis(builder.pauseForCQTBENs),
|
||||
TimeUnit.NANOSECONDS.toMillis(builder.pauseNs));
|
||||
this.pauseForCQTBENs = builder.pauseNs;
|
||||
|
@ -137,8 +140,8 @@ class RawAsyncTableImpl implements AsyncTable<AdvancedScanResultConsumer> {
|
|||
}
|
||||
this.maxAttempts = builder.maxAttempts;
|
||||
this.startLogErrorsCnt = builder.startLogErrorsCnt;
|
||||
this.defaultScannerCaching = tableName.isSystemTable() ? conn.connConf.getMetaScannerCaching() :
|
||||
conn.connConf.getScannerCaching();
|
||||
this.defaultScannerCaching = tableName.isSystemTable() ? conn.connConf.getMetaScannerCaching()
|
||||
: conn.connConf.getScannerCaching();
|
||||
this.defaultScannerMaxResultSize = conn.connConf.getScannerMaxResultSize();
|
||||
}
|
||||
|
||||
|
@ -266,8 +269,7 @@ class RawAsyncTableImpl implements AsyncTable<AdvancedScanResultConsumer> {
|
|||
|
||||
@Override
|
||||
public CompletableFuture<Result> get(Get get) {
|
||||
final Supplier<Span> supplier = newTableOperationSpanBuilder()
|
||||
.setOperation(get);
|
||||
final Supplier<Span> supplier = newTableOperationSpanBuilder().setOperation(get);
|
||||
return tracedFuture(
|
||||
() -> timelineConsistentRead(conn.getLocator(), tableName, get, get.getRow(),
|
||||
RegionLocateType.CURRENT, replicaId -> get(get, replicaId), readRpcTimeoutNs,
|
||||
|
@ -278,20 +280,18 @@ class RawAsyncTableImpl implements AsyncTable<AdvancedScanResultConsumer> {
|
|||
@Override
|
||||
public CompletableFuture<Void> put(Put put) {
|
||||
validatePut(put, conn.connConf.getMaxKeyValueSize());
|
||||
final Supplier<Span> supplier = newTableOperationSpanBuilder()
|
||||
.setOperation(put);
|
||||
final Supplier<Span> supplier = newTableOperationSpanBuilder().setOperation(put);
|
||||
return tracedFuture(() -> this.<Void, Put> newCaller(put, writeRpcTimeoutNs)
|
||||
.action((controller, loc, stub) -> RawAsyncTableImpl.<Put> voidMutate(controller, loc, stub,
|
||||
put, RequestConverter::buildMutateRequest))
|
||||
.call(), supplier);
|
||||
.call(),
|
||||
supplier);
|
||||
}
|
||||
|
||||
@Override
|
||||
public CompletableFuture<Void> delete(Delete delete) {
|
||||
final Supplier<Span> supplier = newTableOperationSpanBuilder()
|
||||
.setOperation(delete);
|
||||
return tracedFuture(
|
||||
() -> this.<Void, Delete> newCaller(delete, writeRpcTimeoutNs)
|
||||
final Supplier<Span> supplier = newTableOperationSpanBuilder().setOperation(delete);
|
||||
return tracedFuture(() -> this.<Void, Delete> newCaller(delete, writeRpcTimeoutNs)
|
||||
.action((controller, loc, stub) -> RawAsyncTableImpl.<Delete> voidMutate(controller, loc,
|
||||
stub, delete, RequestConverter::buildMutateRequest))
|
||||
.call(),
|
||||
|
@ -301,8 +301,7 @@ class RawAsyncTableImpl implements AsyncTable<AdvancedScanResultConsumer> {
|
|||
@Override
|
||||
public CompletableFuture<Result> append(Append append) {
|
||||
checkHasFamilies(append);
|
||||
final Supplier<Span> supplier = newTableOperationSpanBuilder()
|
||||
.setOperation(append);
|
||||
final Supplier<Span> supplier = newTableOperationSpanBuilder().setOperation(append);
|
||||
return tracedFuture(() -> {
|
||||
long nonceGroup = conn.getNonceGenerator().getNonceGroup();
|
||||
long nonce = conn.getNonceGenerator().newNonce();
|
||||
|
@ -317,14 +316,13 @@ class RawAsyncTableImpl implements AsyncTable<AdvancedScanResultConsumer> {
|
|||
@Override
|
||||
public CompletableFuture<Result> increment(Increment increment) {
|
||||
checkHasFamilies(increment);
|
||||
final Supplier<Span> supplier = newTableOperationSpanBuilder()
|
||||
.setOperation(increment);
|
||||
final Supplier<Span> supplier = newTableOperationSpanBuilder().setOperation(increment);
|
||||
return tracedFuture(() -> {
|
||||
long nonceGroup = conn.getNonceGenerator().getNonceGroup();
|
||||
long nonce = conn.getNonceGenerator().newNonce();
|
||||
return this.<Result, Increment> newCaller(increment, rpcTimeoutNs)
|
||||
.action((controller, loc, stub) -> this.<Increment, Result> noncedMutate(nonceGroup, nonce,
|
||||
controller, loc, stub, increment, RequestConverter::buildMutateRequest,
|
||||
.action((controller, loc, stub) -> this.<Increment, Result> noncedMutate(nonceGroup,
|
||||
nonce, controller, loc, stub, increment, RequestConverter::buildMutateRequest,
|
||||
RawAsyncTableImpl::toResult))
|
||||
.call();
|
||||
}, supplier);
|
||||
|
@ -351,8 +349,8 @@ class RawAsyncTableImpl implements AsyncTable<AdvancedScanResultConsumer> {
|
|||
|
||||
@Override
|
||||
public CheckAndMutateBuilder qualifier(byte[] qualifier) {
|
||||
this.qualifier = Preconditions.checkNotNull(qualifier, "qualifier is null. Consider using" +
|
||||
" an empty byte array, or just do not call this method if you want a null qualifier");
|
||||
this.qualifier = Preconditions.checkNotNull(qualifier, "qualifier is null. Consider using"
|
||||
+ " an empty byte array, or just do not call this method if you want a null qualifier");
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -377,8 +375,8 @@ class RawAsyncTableImpl implements AsyncTable<AdvancedScanResultConsumer> {
|
|||
}
|
||||
|
||||
private void preCheck() {
|
||||
Preconditions.checkNotNull(op, "condition is null. You need to specify the condition by" +
|
||||
" calling ifNotExists/ifEquals/ifMatches before executing the request");
|
||||
Preconditions.checkNotNull(op, "condition is null. You need to specify the condition by"
|
||||
+ " calling ifNotExists/ifEquals/ifMatches before executing the request");
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -404,8 +402,8 @@ class RawAsyncTableImpl implements AsyncTable<AdvancedScanResultConsumer> {
|
|||
final Supplier<Span> supplier = newTableOperationSpanBuilder()
|
||||
.setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE)
|
||||
.setContainerOperations(delete);
|
||||
return tracedFuture(
|
||||
() -> RawAsyncTableImpl.this.<Boolean> newCaller(row, delete.getPriority(), rpcTimeoutNs)
|
||||
return tracedFuture(() -> RawAsyncTableImpl.this
|
||||
.<Boolean> newCaller(row, delete.getPriority(), rpcTimeoutNs)
|
||||
.action((controller, loc, stub) -> RawAsyncTableImpl.mutate(controller, loc, stub, delete,
|
||||
(rn, d) -> RequestConverter.buildMutateRequest(rn, row, family, qualifier, op, value,
|
||||
null, timeRange, d, HConstants.NO_NONCE, HConstants.NO_NONCE),
|
||||
|
@ -421,8 +419,7 @@ class RawAsyncTableImpl implements AsyncTable<AdvancedScanResultConsumer> {
|
|||
final Supplier<Span> supplier = newTableOperationSpanBuilder()
|
||||
.setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE)
|
||||
.setContainerOperations(mutations);
|
||||
return tracedFuture(
|
||||
() -> RawAsyncTableImpl.this
|
||||
return tracedFuture(() -> RawAsyncTableImpl.this
|
||||
.<Boolean> newCaller(row, mutations.getMaxPriority(), rpcTimeoutNs)
|
||||
.action((controller, loc, stub) -> RawAsyncTableImpl.this.mutateRow(controller, loc, stub,
|
||||
mutations,
|
||||
|
@ -465,12 +462,11 @@ class RawAsyncTableImpl implements AsyncTable<AdvancedScanResultConsumer> {
|
|||
final Supplier<Span> supplier = newTableOperationSpanBuilder()
|
||||
.setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE)
|
||||
.setContainerOperations(put);
|
||||
return tracedFuture(
|
||||
() -> RawAsyncTableImpl.this.<Boolean> newCaller(row, put.getPriority(), rpcTimeoutNs)
|
||||
.action((controller, loc, stub) -> RawAsyncTableImpl.mutate(controller, loc,
|
||||
stub, put,
|
||||
(rn, p) -> RequestConverter.buildMutateRequest(rn, row, null, null, null, null,
|
||||
filter, timeRange, p, HConstants.NO_NONCE, HConstants.NO_NONCE),
|
||||
return tracedFuture(() -> RawAsyncTableImpl.this
|
||||
.<Boolean> newCaller(row, put.getPriority(), rpcTimeoutNs)
|
||||
.action((controller, loc, stub) -> RawAsyncTableImpl.mutate(controller, loc, stub, put,
|
||||
(rn, p) -> RequestConverter.buildMutateRequest(rn, row, null, null, null, null, filter,
|
||||
timeRange, p, HConstants.NO_NONCE, HConstants.NO_NONCE),
|
||||
(c, r) -> r.getProcessed()))
|
||||
.call(),
|
||||
supplier);
|
||||
|
@ -481,8 +477,8 @@ class RawAsyncTableImpl implements AsyncTable<AdvancedScanResultConsumer> {
|
|||
final Supplier<Span> supplier = newTableOperationSpanBuilder()
|
||||
.setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE)
|
||||
.setContainerOperations(delete);
|
||||
return tracedFuture(
|
||||
() -> RawAsyncTableImpl.this.<Boolean> newCaller(row, delete.getPriority(), rpcTimeoutNs)
|
||||
return tracedFuture(() -> RawAsyncTableImpl.this
|
||||
.<Boolean> newCaller(row, delete.getPriority(), rpcTimeoutNs)
|
||||
.action((controller, loc, stub) -> RawAsyncTableImpl.mutate(controller, loc, stub, delete,
|
||||
(rn, d) -> RequestConverter.buildMutateRequest(rn, row, null, null, null, null, filter,
|
||||
timeRange, d, HConstants.NO_NONCE, HConstants.NO_NONCE),
|
||||
|
@ -497,8 +493,7 @@ class RawAsyncTableImpl implements AsyncTable<AdvancedScanResultConsumer> {
|
|||
final Supplier<Span> supplier = newTableOperationSpanBuilder()
|
||||
.setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE)
|
||||
.setContainerOperations(mutations);
|
||||
return tracedFuture(
|
||||
() -> RawAsyncTableImpl.this
|
||||
return tracedFuture(() -> RawAsyncTableImpl.this
|
||||
.<Boolean> newCaller(row, mutations.getMaxPriority(), rpcTimeoutNs)
|
||||
.action((controller, loc, stub) -> RawAsyncTableImpl.this.mutateRow(controller, loc, stub,
|
||||
mutations,
|
||||
|
@ -517,14 +512,12 @@ class RawAsyncTableImpl implements AsyncTable<AdvancedScanResultConsumer> {
|
|||
|
||||
@Override
|
||||
public CompletableFuture<CheckAndMutateResult> checkAndMutate(CheckAndMutate checkAndMutate) {
|
||||
final Supplier<Span> supplier = newTableOperationSpanBuilder()
|
||||
.setOperation(checkAndMutate)
|
||||
final Supplier<Span> supplier = newTableOperationSpanBuilder().setOperation(checkAndMutate)
|
||||
.setContainerOperations(checkAndMutate.getAction());
|
||||
return tracedFuture(() -> {
|
||||
if (checkAndMutate.getAction() instanceof Put ||
|
||||
checkAndMutate.getAction() instanceof Delete ||
|
||||
checkAndMutate.getAction() instanceof Increment ||
|
||||
checkAndMutate.getAction() instanceof Append) {
|
||||
if (checkAndMutate.getAction() instanceof Put || checkAndMutate.getAction() instanceof Delete
|
||||
|| checkAndMutate.getAction() instanceof Increment
|
||||
|| checkAndMutate.getAction() instanceof Append) {
|
||||
Mutation mutation = (Mutation) checkAndMutate.getAction();
|
||||
if (mutation instanceof Put) {
|
||||
validatePut((Put) mutation, conn.connConf.getMaxKeyValueSize());
|
||||
|
@ -548,15 +541,16 @@ class RawAsyncTableImpl implements AsyncTable<AdvancedScanResultConsumer> {
|
|||
long nonceGroup = conn.getNonceGenerator().getNonceGroup();
|
||||
long nonce = conn.getNonceGenerator().newNonce();
|
||||
return RawAsyncTableImpl.this
|
||||
.<CheckAndMutateResult> newCaller(checkAndMutate.getRow(), rowMutations.getMaxPriority(),
|
||||
rpcTimeoutNs)
|
||||
.<CheckAndMutateResult> newCaller(checkAndMutate.getRow(),
|
||||
rowMutations.getMaxPriority(), rpcTimeoutNs)
|
||||
.action((controller, loc, stub) -> RawAsyncTableImpl.this
|
||||
.<CheckAndMutateResult, CheckAndMutateResult> mutateRow(controller, loc, stub,
|
||||
rowMutations,
|
||||
(rn, rm) -> RequestConverter.buildMultiRequest(rn, checkAndMutate.getRow(),
|
||||
checkAndMutate.getFamily(), checkAndMutate.getQualifier(),
|
||||
checkAndMutate.getCompareOp(), checkAndMutate.getValue(),
|
||||
checkAndMutate.getFilter(), checkAndMutate.getTimeRange(), rm, nonceGroup, nonce),
|
||||
checkAndMutate.getFilter(), checkAndMutate.getTimeRange(), rm, nonceGroup,
|
||||
nonce),
|
||||
resp -> resp))
|
||||
.call();
|
||||
} else {
|
||||
|
@ -571,11 +565,9 @@ class RawAsyncTableImpl implements AsyncTable<AdvancedScanResultConsumer> {
|
|||
@Override
|
||||
public List<CompletableFuture<CheckAndMutateResult>>
|
||||
checkAndMutate(List<CheckAndMutate> checkAndMutates) {
|
||||
final Supplier<Span> supplier = newTableOperationSpanBuilder()
|
||||
.setOperation(checkAndMutates)
|
||||
final Supplier<Span> supplier = newTableOperationSpanBuilder().setOperation(checkAndMutates)
|
||||
.setContainerOperations(checkAndMutates);
|
||||
return tracedFutures(
|
||||
() -> batch(checkAndMutates, rpcTimeoutNs).stream()
|
||||
return tracedFutures(() -> batch(checkAndMutates, rpcTimeoutNs).stream()
|
||||
.map(f -> f.thenApply(r -> (CheckAndMutateResult) r)).collect(toList()),
|
||||
supplier);
|
||||
}
|
||||
|
@ -604,9 +596,11 @@ class RawAsyncTableImpl implements AsyncTable<AdvancedScanResultConsumer> {
|
|||
loc.getServerName(), multiResp);
|
||||
Throwable ex = multiResp.getException(regionName);
|
||||
if (ex != null) {
|
||||
future.completeExceptionally(ex instanceof IOException ? ex :
|
||||
new IOException(
|
||||
"Failed to mutate row: " + Bytes.toStringBinary(mutation.getRow()), ex));
|
||||
future
|
||||
.completeExceptionally(ex instanceof IOException ? ex
|
||||
: new IOException(
|
||||
"Failed to mutate row: " + Bytes.toStringBinary(mutation.getRow()),
|
||||
ex));
|
||||
} else {
|
||||
future.complete(
|
||||
respConverter.apply((RES) multiResp.getResults().get(regionName).result.get(0)));
|
||||
|
@ -628,11 +622,9 @@ class RawAsyncTableImpl implements AsyncTable<AdvancedScanResultConsumer> {
|
|||
validatePutsInRowMutations(mutations, conn.connConf.getMaxKeyValueSize());
|
||||
long nonceGroup = conn.getNonceGenerator().getNonceGroup();
|
||||
long nonce = conn.getNonceGenerator().newNonce();
|
||||
final Supplier<Span> supplier = newTableOperationSpanBuilder()
|
||||
.setOperation(mutations)
|
||||
.setContainerOperations(mutations);
|
||||
return tracedFuture(
|
||||
() -> this
|
||||
final Supplier<Span> supplier =
|
||||
newTableOperationSpanBuilder().setOperation(mutations).setContainerOperations(mutations);
|
||||
return tracedFuture(() -> this
|
||||
.<Result> newCaller(mutations.getRow(), mutations.getMaxPriority(), writeRpcTimeoutNs)
|
||||
.action((controller, loc, stub) -> this.<Result, Result> mutateRow(controller, loc, stub,
|
||||
mutations, (rn, rm) -> RequestConverter.buildMultiRequest(rn, rm, nonceGroup, nonce),
|
||||
|
@ -678,9 +670,6 @@ class RawAsyncTableImpl implements AsyncTable<AdvancedScanResultConsumer> {
|
|||
|
||||
@Override
|
||||
public CompletableFuture<List<Result>> scanAll(Scan scan) {
|
||||
final Supplier<Span> supplier = newTableOperationSpanBuilder()
|
||||
.setOperation(scan);
|
||||
return tracedFuture(() -> {
|
||||
CompletableFuture<List<Result>> future = new CompletableFuture<>();
|
||||
List<Result> scanResults = new ArrayList<>();
|
||||
scan(scan, new AdvancedScanResultConsumer() {
|
||||
|
@ -701,38 +690,33 @@ class RawAsyncTableImpl implements AsyncTable<AdvancedScanResultConsumer> {
|
|||
}
|
||||
});
|
||||
return future;
|
||||
}, supplier);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<CompletableFuture<Result>> get(List<Get> gets) {
|
||||
final Supplier<Span> supplier = newTableOperationSpanBuilder()
|
||||
.setOperation(gets)
|
||||
final Supplier<Span> supplier = newTableOperationSpanBuilder().setOperation(gets)
|
||||
.setContainerOperations(HBaseSemanticAttributes.Operation.GET);
|
||||
return tracedFutures(() -> batch(gets, readRpcTimeoutNs), supplier);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<CompletableFuture<Void>> put(List<Put> puts) {
|
||||
final Supplier<Span> supplier = newTableOperationSpanBuilder()
|
||||
.setOperation(puts)
|
||||
final Supplier<Span> supplier = newTableOperationSpanBuilder().setOperation(puts)
|
||||
.setContainerOperations(HBaseSemanticAttributes.Operation.PUT);
|
||||
return tracedFutures(() -> voidMutate(puts), supplier);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<CompletableFuture<Void>> delete(List<Delete> deletes) {
|
||||
final Supplier<Span> supplier = newTableOperationSpanBuilder()
|
||||
.setOperation(deletes)
|
||||
final Supplier<Span> supplier = newTableOperationSpanBuilder().setOperation(deletes)
|
||||
.setContainerOperations(HBaseSemanticAttributes.Operation.DELETE);
|
||||
return tracedFutures(() -> voidMutate(deletes), supplier);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T> List<CompletableFuture<T>> batch(List<? extends Row> actions) {
|
||||
final Supplier<Span> supplier = newTableOperationSpanBuilder()
|
||||
.setOperation(actions)
|
||||
.setContainerOperations(actions);
|
||||
final Supplier<Span> supplier =
|
||||
newTableOperationSpanBuilder().setOperation(actions).setContainerOperations(actions);
|
||||
return tracedFutures(() -> batch(actions, rpcTimeoutNs), supplier);
|
||||
}
|
||||
|
||||
|
@ -853,10 +837,8 @@ class RawAsyncTableImpl implements AsyncTable<AdvancedScanResultConsumer> {
|
|||
if (locateFinished(region, endKey, endKeyInclusive)) {
|
||||
locateFinished.set(true);
|
||||
} else {
|
||||
addListener(
|
||||
conn.getLocator().getRegionLocation(tableName, region.getEndKey(), RegionLocateType.CURRENT,
|
||||
operationTimeoutNs),
|
||||
(l, e) -> {
|
||||
addListener(conn.getLocator().getRegionLocation(tableName, region.getEndKey(),
|
||||
RegionLocateType.CURRENT, operationTimeoutNs), (l, e) -> {
|
||||
try (Scope ignored = span.makeCurrent()) {
|
||||
onLocateComplete(stubMaker, callable, callback, locs, endKey, endKeyInclusive,
|
||||
locateFinished, unfinishedRequest, l, e);
|
||||
|
@ -904,9 +886,9 @@ class RawAsyncTableImpl implements AsyncTable<AdvancedScanResultConsumer> {
|
|||
@Override
|
||||
public CoprocessorServiceBuilderImpl<S, R> fromRow(byte[] startKey, boolean inclusive) {
|
||||
this.startKey = Preconditions.checkNotNull(startKey,
|
||||
"startKey is null. Consider using" +
|
||||
" an empty byte array, or just do not call this method if you want to start selection" +
|
||||
" from the first region");
|
||||
"startKey is null. Consider using"
|
||||
+ " an empty byte array, or just do not call this method if you want to start selection"
|
||||
+ " from the first region");
|
||||
this.startKeyInclusive = inclusive;
|
||||
return this;
|
||||
}
|
||||
|
@ -914,9 +896,9 @@ class RawAsyncTableImpl implements AsyncTable<AdvancedScanResultConsumer> {
|
|||
@Override
|
||||
public CoprocessorServiceBuilderImpl<S, R> toRow(byte[] endKey, boolean inclusive) {
|
||||
this.endKey = Preconditions.checkNotNull(endKey,
|
||||
"endKey is null. Consider using" +
|
||||
" an empty byte array, or just do not call this method if you want to continue" +
|
||||
" selection to the last region");
|
||||
"endKey is null. Consider using"
|
||||
+ " an empty byte array, or just do not call this method if you want to continue"
|
||||
+ " selection to the last region");
|
||||
this.endKeyInclusive = inclusive;
|
||||
return this;
|
||||
}
|
||||
|
@ -924,12 +906,10 @@ class RawAsyncTableImpl implements AsyncTable<AdvancedScanResultConsumer> {
|
|||
@Override
|
||||
public void execute() {
|
||||
final Span span = newTableOperationSpanBuilder()
|
||||
.setOperation(HBaseSemanticAttributes.Operation.COPROC_EXEC)
|
||||
.build();
|
||||
.setOperation(HBaseSemanticAttributes.Operation.COPROC_EXEC).build();
|
||||
try (Scope ignored = span.makeCurrent()) {
|
||||
final RegionLocateType regionLocateType = startKeyInclusive
|
||||
? RegionLocateType.CURRENT
|
||||
: RegionLocateType.AFTER;
|
||||
final RegionLocateType regionLocateType =
|
||||
startKeyInclusive ? RegionLocateType.CURRENT : RegionLocateType.AFTER;
|
||||
final CompletableFuture<HRegionLocation> future = conn.getLocator()
|
||||
.getRegionLocation(tableName, startKey, regionLocateType, operationTimeoutNs);
|
||||
addListener(future, (loc, error) -> {
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
/**
|
||||
*
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -24,9 +23,8 @@ import java.io.UncheckedIOException;
|
|||
import java.util.ArrayList;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
||||
/**
|
||||
* Interface for client-side scanning. Go to {@link Table} to obtain instances.
|
||||
|
@ -50,7 +48,8 @@ public interface ResultScanner extends Closeable, Iterable<Result> {
|
|||
return true;
|
||||
}
|
||||
try {
|
||||
return (next = ResultScanner.this.next()) != null;
|
||||
next = ResultScanner.this.next();
|
||||
return next != null;
|
||||
} catch (IOException e) {
|
||||
throw new UncheckedIOException(e);
|
||||
}
|
||||
|
@ -89,7 +88,6 @@ public interface ResultScanner extends Closeable, Iterable<Result> {
|
|||
* @param nbRows number of rows to return
|
||||
* @return Between zero and nbRows rowResults. Scan is done if returned array is of zero-length
|
||||
* (We never return null).
|
||||
* @throws IOException
|
||||
*/
|
||||
default Result[] next(int nbRows) throws IOException {
|
||||
List<Result> resultSets = new ArrayList<>(nbRows);
|
||||
|
|
|
@ -28,6 +28,8 @@ import static org.apache.hadoop.hbase.client.trace.hamcrest.TraceTestUtil.buildT
|
|||
import static org.hamcrest.MatcherAssert.assertThat;
|
||||
import static org.hamcrest.Matchers.allOf;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.greaterThan;
|
||||
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||
import static org.hamcrest.Matchers.hasItem;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
import static org.junit.Assert.fail;
|
||||
|
@ -36,6 +38,7 @@ import static org.mockito.ArgumentMatchers.anyInt;
|
|||
import static org.mockito.ArgumentMatchers.anyLong;
|
||||
import static org.mockito.Mockito.doAnswer;
|
||||
import static org.mockito.Mockito.mock;
|
||||
|
||||
import io.opentelemetry.api.trace.SpanKind;
|
||||
import io.opentelemetry.api.trace.StatusCode;
|
||||
import io.opentelemetry.sdk.testing.junit4.OpenTelemetryRule;
|
||||
|
@ -44,8 +47,10 @@ import java.io.IOException;
|
|||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.ForkJoinPool;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.stream.Collectors;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.Cell;
|
||||
|
@ -75,8 +80,10 @@ import org.junit.Test;
|
|||
import org.junit.experimental.categories.Category;
|
||||
import org.mockito.invocation.InvocationOnMock;
|
||||
import org.mockito.stubbing.Answer;
|
||||
|
||||
import org.apache.hbase.thirdparty.com.google.common.io.Closeables;
|
||||
import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback;
|
||||
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
|
||||
|
@ -105,7 +112,7 @@ public class TestAsyncTableTracing {
|
|||
|
||||
private AsyncConnectionImpl conn;
|
||||
|
||||
private AsyncTable<?> table;
|
||||
private AsyncTable<ScanResultConsumer> table;
|
||||
|
||||
@Rule
|
||||
public OpenTelemetryRule traceRule = OpenTelemetryRule.create();
|
||||
|
@ -175,9 +182,9 @@ public class TestAsyncTableTracing {
|
|||
case INCREMENT:
|
||||
ColumnValue value = req.getColumnValue(0);
|
||||
QualifierValue qvalue = value.getQualifierValue(0);
|
||||
Cell cell = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY)
|
||||
.setType(Cell.Type.Put).setRow(req.getRow().toByteArray())
|
||||
.setFamily(value.getFamily().toByteArray())
|
||||
Cell cell =
|
||||
CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setType(Cell.Type.Put)
|
||||
.setRow(req.getRow().toByteArray()).setFamily(value.getFamily().toByteArray())
|
||||
.setQualifier(qvalue.getQualifier().toByteArray())
|
||||
.setValue(qvalue.getValue().toByteArray()).build();
|
||||
resp = MutateResponse.newBuilder()
|
||||
|
@ -202,8 +209,7 @@ public class TestAsyncTableTracing {
|
|||
}
|
||||
}).when(stub).get(any(HBaseRpcController.class), any(GetRequest.class), any());
|
||||
final User user = UserProvider.instantiate(CONF).getCurrent();
|
||||
conn = new AsyncConnectionImpl(CONF, new DoNothingConnectionRegistry(CONF), "test",
|
||||
user) {
|
||||
conn = new AsyncConnectionImpl(CONF, new DoNothingConnectionRegistry(CONF), "test", user) {
|
||||
|
||||
@Override
|
||||
AsyncRegionLocator getLocator() {
|
||||
|
@ -249,26 +255,19 @@ public class TestAsyncTableTracing {
|
|||
// n.b. this method implementation must match the one of the same name found in
|
||||
// TestHTableTracing
|
||||
final TableName tableName = table.getName();
|
||||
final Matcher<SpanData> spanLocator = allOf(
|
||||
hasName(containsString(tableOperation)), hasEnded());
|
||||
final Matcher<SpanData> spanLocator =
|
||||
allOf(hasName(containsString(tableOperation)), hasEnded());
|
||||
final String expectedName = tableOperation + " " + tableName.getNameWithNamespaceInclAsString();
|
||||
|
||||
Waiter.waitFor(CONF, 1000, new MatcherPredicate<>(
|
||||
"waiting for span to emit",
|
||||
Waiter.waitFor(CONF, 1000, new MatcherPredicate<>("waiting for span to emit",
|
||||
() -> traceRule.getSpans(), hasItem(spanLocator)));
|
||||
List<SpanData> candidateSpans = traceRule.getSpans()
|
||||
.stream()
|
||||
.filter(spanLocator::matches)
|
||||
.collect(Collectors.toList());
|
||||
List<SpanData> candidateSpans =
|
||||
traceRule.getSpans().stream().filter(spanLocator::matches).collect(Collectors.toList());
|
||||
assertThat(candidateSpans, hasSize(1));
|
||||
SpanData data = candidateSpans.iterator().next();
|
||||
assertThat(data, allOf(
|
||||
hasName(expectedName),
|
||||
hasKind(SpanKind.CLIENT),
|
||||
hasStatusWithCode(StatusCode.OK),
|
||||
buildConnectionAttributesMatcher(conn),
|
||||
buildTableAttributesMatcher(tableName),
|
||||
matcher));
|
||||
assertThat(data,
|
||||
allOf(hasName(expectedName), hasKind(SpanKind.CLIENT), hasStatusWithCode(StatusCode.OK),
|
||||
buildConnectionAttributesMatcher(conn), buildTableAttributesMatcher(tableName), matcher));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -341,9 +340,9 @@ public class TestAsyncTableTracing {
|
|||
.ifEquals(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v"))
|
||||
.build(new Delete(Bytes.toBytes(0))))).toArray(new CompletableFuture[0]))
|
||||
.join();
|
||||
assertTrace("BATCH", hasAttributes(
|
||||
containsEntryWithStringValuesOf(
|
||||
"db.hbase.container_operations", "CHECK_AND_MUTATE", "DELETE")));
|
||||
assertTrace("BATCH",
|
||||
hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations",
|
||||
"CHECK_AND_MUTATE", "DELETE")));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -351,15 +350,14 @@ public class TestAsyncTableTracing {
|
|||
table.checkAndMutateAll(Arrays.asList(CheckAndMutate.newBuilder(Bytes.toBytes(0))
|
||||
.ifEquals(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v"))
|
||||
.build(new Delete(Bytes.toBytes(0))))).join();
|
||||
assertTrace("BATCH", hasAttributes(
|
||||
containsEntryWithStringValuesOf(
|
||||
"db.hbase.container_operations", "CHECK_AND_MUTATE", "DELETE")));
|
||||
assertTrace("BATCH",
|
||||
hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations",
|
||||
"CHECK_AND_MUTATE", "DELETE")));
|
||||
}
|
||||
|
||||
private void testCheckAndMutateBuilder(Row op) {
|
||||
AsyncTable.CheckAndMutateBuilder builder =
|
||||
table.checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf"))
|
||||
.qualifier(Bytes.toBytes("cq"))
|
||||
table.checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")).qualifier(Bytes.toBytes("cq"))
|
||||
.ifEquals(Bytes.toBytes("v"));
|
||||
if (op instanceof Put) {
|
||||
Put put = (Put) op;
|
||||
|
@ -378,8 +376,8 @@ public class TestAsyncTableTracing {
|
|||
|
||||
@Test
|
||||
public void testCheckAndMutateBuilderThenPut() {
|
||||
Put put = new Put(Bytes.toBytes(0))
|
||||
.addColumn(Bytes.toBytes("f"), Bytes.toBytes("cq"), Bytes.toBytes("v"));
|
||||
Put put = new Put(Bytes.toBytes(0)).addColumn(Bytes.toBytes("f"), Bytes.toBytes("cq"),
|
||||
Bytes.toBytes("v"));
|
||||
testCheckAndMutateBuilder(put);
|
||||
}
|
||||
|
||||
|
@ -390,9 +388,10 @@ public class TestAsyncTableTracing {
|
|||
|
||||
@Test
|
||||
public void testCheckAndMutateBuilderThenMutations() throws IOException {
|
||||
RowMutations mutations = new RowMutations(Bytes.toBytes(0))
|
||||
.add((Mutation) (new Put(Bytes.toBytes(0))
|
||||
.addColumn(Bytes.toBytes("f"), Bytes.toBytes("cq"), Bytes.toBytes("v"))))
|
||||
RowMutations mutations =
|
||||
new RowMutations(Bytes.toBytes(0))
|
||||
.add((Mutation) (new Put(Bytes.toBytes(0)).addColumn(Bytes.toBytes("f"),
|
||||
Bytes.toBytes("cq"), Bytes.toBytes("v"))))
|
||||
.add((Mutation) new Delete(Bytes.toBytes(0)));
|
||||
testCheckAndMutateBuilder(mutations);
|
||||
}
|
||||
|
@ -418,8 +417,8 @@ public class TestAsyncTableTracing {
|
|||
|
||||
@Test
|
||||
public void testCheckAndMutateWithFilterBuilderThenPut() {
|
||||
Put put = new Put(Bytes.toBytes(0))
|
||||
.addColumn(Bytes.toBytes("f"), Bytes.toBytes("cq"), Bytes.toBytes("v"));
|
||||
Put put = new Put(Bytes.toBytes(0)).addColumn(Bytes.toBytes("f"), Bytes.toBytes("cq"),
|
||||
Bytes.toBytes("v"));
|
||||
testCheckAndMutateWithFilterBuilder(put);
|
||||
}
|
||||
|
||||
|
@ -430,18 +429,20 @@ public class TestAsyncTableTracing {
|
|||
|
||||
@Test
|
||||
public void testCheckAndMutateWithFilterBuilderThenMutations() throws IOException {
|
||||
RowMutations mutations = new RowMutations(Bytes.toBytes(0))
|
||||
.add((Mutation) new Put(Bytes.toBytes(0))
|
||||
.addColumn(Bytes.toBytes("f"), Bytes.toBytes("cq"), Bytes.toBytes("v")))
|
||||
RowMutations mutations =
|
||||
new RowMutations(Bytes.toBytes(0))
|
||||
.add((Mutation) new Put(Bytes.toBytes(0)).addColumn(Bytes.toBytes("f"),
|
||||
Bytes.toBytes("cq"), Bytes.toBytes("v")))
|
||||
.add((Mutation) new Delete(Bytes.toBytes(0)));
|
||||
testCheckAndMutateWithFilterBuilder(mutations);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMutateRow() throws IOException {
|
||||
final RowMutations mutations = new RowMutations(Bytes.toBytes(0))
|
||||
.add((Mutation) new Put(Bytes.toBytes(0))
|
||||
.addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v")))
|
||||
final RowMutations mutations =
|
||||
new RowMutations(Bytes.toBytes(0))
|
||||
.add((Mutation) new Put(Bytes.toBytes(0)).addColumn(Bytes.toBytes("cf"),
|
||||
Bytes.toBytes("cq"), Bytes.toBytes("v")))
|
||||
.add((Mutation) new Delete(Bytes.toBytes(0)));
|
||||
table.mutateRow(mutations).join();
|
||||
assertTrace("BATCH", hasAttributes(
|
||||
|
@ -454,37 +455,88 @@ public class TestAsyncTableTracing {
|
|||
assertTrace("SCAN");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testScan() throws Throwable {
|
||||
final CountDownLatch doneSignal = new CountDownLatch(1);
|
||||
final AtomicInteger count = new AtomicInteger();
|
||||
final AtomicReference<Throwable> throwable = new AtomicReference<>();
|
||||
final Scan scan = new Scan().setCaching(1).setMaxResultSize(1).setLimit(1);
|
||||
table.scan(scan, new ScanResultConsumer() {
|
||||
@Override
|
||||
public boolean onNext(Result result) {
|
||||
if (result.getRow() != null) {
|
||||
count.incrementAndGet();
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onError(Throwable error) {
|
||||
throwable.set(error);
|
||||
doneSignal.countDown();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onComplete() {
|
||||
doneSignal.countDown();
|
||||
}
|
||||
});
|
||||
doneSignal.await();
|
||||
if (throwable.get() != null) {
|
||||
throw throwable.get();
|
||||
}
|
||||
assertThat("user code did not run. check test setup.", count.get(), greaterThan(0));
|
||||
assertTrace("SCAN");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetScanner() {
|
||||
final Scan scan = new Scan().setCaching(1).setMaxResultSize(1).setLimit(1);
|
||||
try (ResultScanner scanner = table.getScanner(scan)) {
|
||||
int count = 0;
|
||||
for (Result result : scanner) {
|
||||
if (result.getRow() != null) {
|
||||
count++;
|
||||
}
|
||||
}
|
||||
// do something with it.
|
||||
assertThat(count, greaterThanOrEqualTo(0));
|
||||
}
|
||||
assertTrace("SCAN");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testExistsList() {
|
||||
CompletableFuture
|
||||
.allOf(
|
||||
table.exists(Arrays.asList(new Get(Bytes.toBytes(0)))).toArray(new CompletableFuture[0]))
|
||||
.join();
|
||||
assertTrace("BATCH", hasAttributes(
|
||||
containsEntryWithStringValuesOf("db.hbase.container_operations", "GET")));
|
||||
assertTrace("BATCH",
|
||||
hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "GET")));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testExistsAll() {
|
||||
table.existsAll(Arrays.asList(new Get(Bytes.toBytes(0)))).join();
|
||||
assertTrace("BATCH", hasAttributes(
|
||||
containsEntryWithStringValuesOf("db.hbase.container_operations", "GET")));
|
||||
assertTrace("BATCH",
|
||||
hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "GET")));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetList() {
|
||||
CompletableFuture
|
||||
.allOf(table.get(Arrays.asList(new Get(Bytes.toBytes(0)))).toArray(new CompletableFuture[0]))
|
||||
.allOf(
|
||||
table.get(Arrays.asList(new Get(Bytes.toBytes(0)))).toArray(new CompletableFuture[0]))
|
||||
.join();
|
||||
assertTrace("BATCH", hasAttributes(
|
||||
containsEntryWithStringValuesOf("db.hbase.container_operations", "GET")));
|
||||
assertTrace("BATCH",
|
||||
hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "GET")));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetAll() {
|
||||
table.getAll(Arrays.asList(new Get(Bytes.toBytes(0)))).join();
|
||||
assertTrace("BATCH", hasAttributes(
|
||||
containsEntryWithStringValuesOf("db.hbase.container_operations", "GET")));
|
||||
assertTrace("BATCH",
|
||||
hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "GET")));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -493,49 +545,47 @@ public class TestAsyncTableTracing {
|
|||
.allOf(table.put(Arrays.asList(new Put(Bytes.toBytes(0)).addColumn(Bytes.toBytes("cf"),
|
||||
Bytes.toBytes("cq"), Bytes.toBytes("v")))).toArray(new CompletableFuture[0]))
|
||||
.join();
|
||||
assertTrace("BATCH", hasAttributes(
|
||||
containsEntryWithStringValuesOf("db.hbase.container_operations", "PUT")));
|
||||
assertTrace("BATCH",
|
||||
hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "PUT")));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPutAll() {
|
||||
table.putAll(Arrays.asList(new Put(Bytes.toBytes(0)).addColumn(Bytes.toBytes("cf"),
|
||||
Bytes.toBytes("cq"), Bytes.toBytes("v")))).join();
|
||||
assertTrace("BATCH", hasAttributes(
|
||||
containsEntryWithStringValuesOf("db.hbase.container_operations", "PUT")));
|
||||
assertTrace("BATCH",
|
||||
hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "PUT")));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDeleteList() {
|
||||
CompletableFuture
|
||||
.allOf(
|
||||
CompletableFuture.allOf(
|
||||
table.delete(Arrays.asList(new Delete(Bytes.toBytes(0)))).toArray(new CompletableFuture[0]))
|
||||
.join();
|
||||
assertTrace("BATCH", hasAttributes(
|
||||
containsEntryWithStringValuesOf("db.hbase.container_operations", "DELETE")));
|
||||
assertTrace("BATCH",
|
||||
hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "DELETE")));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDeleteAll() {
|
||||
table.deleteAll(Arrays.asList(new Delete(Bytes.toBytes(0)))).join();
|
||||
assertTrace("BATCH", hasAttributes(
|
||||
containsEntryWithStringValuesOf("db.hbase.container_operations", "DELETE")));
|
||||
assertTrace("BATCH",
|
||||
hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "DELETE")));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBatch() {
|
||||
CompletableFuture
|
||||
.allOf(
|
||||
CompletableFuture.allOf(
|
||||
table.batch(Arrays.asList(new Delete(Bytes.toBytes(0)))).toArray(new CompletableFuture[0]))
|
||||
.join();
|
||||
assertTrace("BATCH", hasAttributes(
|
||||
containsEntryWithStringValuesOf("db.hbase.container_operations", "DELETE")));
|
||||
assertTrace("BATCH",
|
||||
hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "DELETE")));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBatchAll() {
|
||||
table.batchAll(Arrays.asList(new Delete(Bytes.toBytes(0)))).join();
|
||||
assertTrace("BATCH", hasAttributes(
|
||||
containsEntryWithStringValuesOf("db.hbase.container_operations", "DELETE")));
|
||||
assertTrace("BATCH",
|
||||
hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "DELETE")));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,15 +17,19 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.client.trace.hamcrest;
|
||||
|
||||
import static org.apache.hadoop.hbase.client.trace.hamcrest.AttributesMatchers.containsEntry;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
|
||||
import io.opentelemetry.api.common.Attributes;
|
||||
import io.opentelemetry.api.trace.SpanKind;
|
||||
import io.opentelemetry.api.trace.StatusCode;
|
||||
import io.opentelemetry.sdk.trace.data.EventData;
|
||||
import io.opentelemetry.sdk.trace.data.SpanData;
|
||||
import io.opentelemetry.sdk.trace.data.StatusData;
|
||||
import io.opentelemetry.semconv.trace.attributes.SemanticAttributes;
|
||||
import java.time.Duration;
|
||||
import java.util.Objects;
|
||||
import org.hamcrest.Description;
|
||||
import org.hamcrest.FeatureMatcher;
|
||||
import org.hamcrest.Matcher;
|
||||
|
@ -36,21 +40,22 @@ import org.hamcrest.TypeSafeMatcher;
|
|||
*/
|
||||
public final class SpanDataMatchers {
|
||||
|
||||
private SpanDataMatchers() { }
|
||||
private SpanDataMatchers() {
|
||||
}
|
||||
|
||||
public static Matcher<SpanData> hasAttributes(Matcher<Attributes> matcher) {
|
||||
return new FeatureMatcher<SpanData, Attributes>(
|
||||
matcher, "SpanData having attributes that ", "attributes"
|
||||
) {
|
||||
@Override protected Attributes featureValueOf(SpanData item) {
|
||||
return new FeatureMatcher<SpanData, Attributes>(matcher, "SpanData having attributes that ",
|
||||
"attributes") {
|
||||
@Override
|
||||
protected Attributes featureValueOf(SpanData item) {
|
||||
return item.getAttributes();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
public static Matcher<SpanData> hasDuration(Matcher<Duration> matcher) {
|
||||
return new FeatureMatcher<SpanData, Duration>(
|
||||
matcher, "SpanData having duration that ", "duration") {
|
||||
return new FeatureMatcher<SpanData, Duration>(matcher, "SpanData having duration that ",
|
||||
"duration") {
|
||||
@Override
|
||||
protected Duration featureValueOf(SpanData item) {
|
||||
return Duration.ofNanos(item.getEndEpochNanos() - item.getStartEpochNanos());
|
||||
|
@ -60,28 +65,49 @@ public final class SpanDataMatchers {
|
|||
|
||||
public static Matcher<SpanData> hasEnded() {
|
||||
return new TypeSafeMatcher<SpanData>() {
|
||||
@Override protected boolean matchesSafely(SpanData item) {
|
||||
@Override
|
||||
protected boolean matchesSafely(SpanData item) {
|
||||
return item.hasEnded();
|
||||
}
|
||||
@Override public void describeTo(Description description) {
|
||||
|
||||
@Override
|
||||
public void describeTo(Description description) {
|
||||
description.appendText("SpanData that hasEnded");
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
public static Matcher<SpanData> hasEvents(Matcher<Iterable<? super EventData>> matcher) {
|
||||
return new FeatureMatcher<SpanData, Iterable<? super EventData>>(
|
||||
matcher, "SpanData having events that", "events") {
|
||||
@Override protected Iterable<? super EventData> featureValueOf(SpanData item) {
|
||||
return new FeatureMatcher<SpanData, Iterable<? super EventData>>(matcher,
|
||||
"SpanData having events that", "events") {
|
||||
@Override
|
||||
protected Iterable<? super EventData> featureValueOf(SpanData item) {
|
||||
return item.getEvents();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
public static Matcher<SpanData> hasExceptionWithType(Matcher<? super String> matcher) {
|
||||
return hasException(containsEntry(is(SemanticAttributes.EXCEPTION_TYPE), matcher));
|
||||
}
|
||||
|
||||
public static Matcher<SpanData> hasException(Matcher<? super Attributes> matcher) {
|
||||
return new FeatureMatcher<SpanData, Attributes>(matcher,
|
||||
"SpanData having Exception with Attributes that", "exception attributes") {
|
||||
@Override
|
||||
protected Attributes featureValueOf(SpanData actual) {
|
||||
return actual.getEvents().stream()
|
||||
.filter(e -> Objects.equals(SemanticAttributes.EXCEPTION_EVENT_NAME, e.getName()))
|
||||
.map(EventData::getAttributes).findFirst().orElse(null);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
public static Matcher<SpanData> hasKind(SpanKind kind) {
|
||||
return new FeatureMatcher<SpanData, SpanKind>(
|
||||
equalTo(kind), "SpanData with kind that", "SpanKind") {
|
||||
@Override protected SpanKind featureValueOf(SpanData item) {
|
||||
return new FeatureMatcher<SpanData, SpanKind>(equalTo(kind), "SpanData with kind that",
|
||||
"SpanKind") {
|
||||
@Override
|
||||
protected SpanKind featureValueOf(SpanData item) {
|
||||
return item.getKind();
|
||||
}
|
||||
};
|
||||
|
@ -93,7 +119,8 @@ public final class SpanDataMatchers {
|
|||
|
||||
public static Matcher<SpanData> hasName(Matcher<String> matcher) {
|
||||
return new FeatureMatcher<SpanData, String>(matcher, "SpanKind with a name that", "name") {
|
||||
@Override protected String featureValueOf(SpanData item) {
|
||||
@Override
|
||||
protected String featureValueOf(SpanData item) {
|
||||
return item.getName();
|
||||
}
|
||||
};
|
||||
|
@ -109,9 +136,9 @@ public final class SpanDataMatchers {
|
|||
|
||||
public static Matcher<SpanData> hasParentSpanId(Matcher<String> matcher) {
|
||||
return new FeatureMatcher<SpanData, String>(matcher, "SpanKind with a parentSpanId that",
|
||||
"parentSpanId"
|
||||
) {
|
||||
@Override protected String featureValueOf(SpanData item) {
|
||||
"parentSpanId") {
|
||||
@Override
|
||||
protected String featureValueOf(SpanData item) {
|
||||
return item.getParentSpanId();
|
||||
}
|
||||
};
|
||||
|
@ -120,13 +147,15 @@ public final class SpanDataMatchers {
|
|||
public static Matcher<SpanData> hasStatusWithCode(StatusCode statusCode) {
|
||||
final Matcher<StatusCode> matcher = is(equalTo(statusCode));
|
||||
return new TypeSafeMatcher<SpanData>() {
|
||||
@Override protected boolean matchesSafely(SpanData item) {
|
||||
@Override
|
||||
protected boolean matchesSafely(SpanData item) {
|
||||
final StatusData statusData = item.getStatus();
|
||||
return statusData != null
|
||||
&& statusData.getStatusCode() != null
|
||||
return statusData != null && statusData.getStatusCode() != null
|
||||
&& matcher.matches(statusData.getStatusCode());
|
||||
}
|
||||
@Override public void describeTo(Description description) {
|
||||
|
||||
@Override
|
||||
public void describeTo(Description description) {
|
||||
description.appendText("SpanData with StatusCode that ").appendDescriptionOf(matcher);
|
||||
}
|
||||
};
|
||||
|
@ -137,9 +166,10 @@ public final class SpanDataMatchers {
|
|||
}
|
||||
|
||||
public static Matcher<SpanData> hasTraceId(Matcher<String> matcher) {
|
||||
return new FeatureMatcher<SpanData, String>(
|
||||
matcher, "SpanData with a traceId that ", "traceId") {
|
||||
@Override protected String featureValueOf(SpanData item) {
|
||||
return new FeatureMatcher<SpanData, String>(matcher, "SpanData with a traceId that ",
|
||||
"traceId") {
|
||||
@Override
|
||||
protected String featureValueOf(SpanData item) {
|
||||
return item.getTraceId();
|
||||
}
|
||||
};
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
/**
|
||||
*
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -20,7 +19,6 @@ package org.apache.hadoop.hbase;
|
|||
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.hadoop.hbase.master.HMaster;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
@ -28,10 +26,9 @@ import org.apache.yetus.audience.InterfaceAudience;
|
|||
/**
|
||||
* Options for starting up a mini cluster (including an hbase, dfs and zookeeper clusters) in test.
|
||||
* The options include HDFS options to build mini dfs cluster, Zookeeper options to build mini zk
|
||||
* cluster, and mostly HBase options to build mini hbase cluster.
|
||||
* cluster, and mostly HBase options to build mini hbase cluster. To create an object, use a
|
||||
* {@link Builder}. Example usage:
|
||||
*
|
||||
* To create an object, use a {@link Builder}.
|
||||
* Example usage:
|
||||
* <pre>
|
||||
* StartMiniClusterOption option = StartMiniClusterOption.builder().
|
||||
* .numMasters(3).rsClass(MyRegionServer.class).createWALDir(true).build();
|
||||
|
@ -42,8 +39,8 @@ import org.apache.yetus.audience.InterfaceAudience;
|
|||
@InterfaceAudience.Public
|
||||
public final class StartMiniClusterOption {
|
||||
/**
|
||||
* Number of masters to start up. We'll start this many hbase masters. If numMasters > 1, you
|
||||
* can find the active/primary master with {@link MiniHBaseCluster#getMaster()}.
|
||||
* Number of masters to start up. We'll start this many hbase masters. If numMasters > 1, you can
|
||||
* find the active/primary master with {@link MiniHBaseCluster#getMaster()}.
|
||||
*/
|
||||
private final int numMasters;
|
||||
|
||||
|
@ -60,9 +57,8 @@ public final class StartMiniClusterOption {
|
|||
private final Class<? extends HMaster> masterClass;
|
||||
|
||||
/**
|
||||
* Number of region servers to start up.
|
||||
* If this value is > 1, then make sure config "hbase.regionserver.info.port" is -1
|
||||
* (i.e. no ui per regionserver) otherwise bind errors.
|
||||
* Number of region servers to start up. If this value is > 1, then make sure config
|
||||
* "hbase.regionserver.info.port" is -1 (i.e. no ui per regionserver) otherwise bind errors.
|
||||
*/
|
||||
private final int numRegionServers;
|
||||
/**
|
||||
|
@ -172,9 +168,9 @@ public final class StartMiniClusterOption {
|
|||
public String toString() {
|
||||
return "StartMiniClusterOption{" + "numMasters=" + numMasters + ", masterClass=" + masterClass
|
||||
+ ", numRegionServers=" + numRegionServers + ", rsPorts=" + StringUtils.join(rsPorts)
|
||||
+ ", rsClass=" + rsClass + ", numDataNodes=" + numDataNodes
|
||||
+ ", dataNodeHosts=" + Arrays.toString(dataNodeHosts) + ", numZkServers=" + numZkServers
|
||||
+ ", createRootDir=" + createRootDir + ", createWALDir=" + createWALDir + '}';
|
||||
+ ", rsClass=" + rsClass + ", numDataNodes=" + numDataNodes + ", dataNodeHosts="
|
||||
+ Arrays.toString(dataNodeHosts) + ", numZkServers=" + numZkServers + ", createRootDir="
|
||||
+ createRootDir + ", createWALDir=" + createWALDir + '}';
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -185,10 +181,9 @@ public final class StartMiniClusterOption {
|
|||
}
|
||||
|
||||
/**
|
||||
* Builder pattern for creating an {@link StartMiniClusterOption}.
|
||||
*
|
||||
* The default values of its fields should be considered public and constant. Changing the default
|
||||
* values may cause other tests fail.
|
||||
* Builder pattern for creating an {@link StartMiniClusterOption}. The default values of its
|
||||
* fields should be considered public and constant. Changing the default values may cause other
|
||||
* tests fail.
|
||||
*/
|
||||
public static final class Builder {
|
||||
private int numMasters = 1;
|
||||
|
@ -260,6 +255,10 @@ public final class StartMiniClusterOption {
|
|||
return this;
|
||||
}
|
||||
|
||||
public Builder numWorkers(int numWorkers) {
|
||||
return numDataNodes(numWorkers).numRegionServers(numWorkers);
|
||||
}
|
||||
|
||||
public Builder createRootDir(boolean createRootDir) {
|
||||
this.createRootDir = createRootDir;
|
||||
return this;
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -17,30 +17,91 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import static org.hamcrest.MatcherAssert.assertThat;
|
||||
import static org.hamcrest.Matchers.allOf;
|
||||
import static org.hamcrest.Matchers.endsWith;
|
||||
import static org.hamcrest.Matchers.hasItem;
|
||||
import static org.hamcrest.Matchers.hasProperty;
|
||||
import static org.hamcrest.Matchers.isA;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.assertThrows;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
import io.opentelemetry.sdk.trace.data.SpanData;
|
||||
import java.io.IOException;
|
||||
import java.io.UncheckedIOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.ForkJoinPool;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.IntStream;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.ConnectionRule;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.MatcherPredicate;
|
||||
import org.apache.hadoop.hbase.MiniClusterRule;
|
||||
import org.apache.hadoop.hbase.StartMiniClusterOption;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.Waiter;
|
||||
import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException;
|
||||
import org.apache.hadoop.hbase.trace.OpenTelemetryClassRule;
|
||||
import org.apache.hadoop.hbase.trace.OpenTelemetryTestRule;
|
||||
import org.apache.hadoop.hbase.trace.TraceUtil;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.JVMClusterUtil;
|
||||
import org.apache.hadoop.hbase.util.Pair;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.hamcrest.Matcher;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Rule;
|
||||
import org.junit.Test;
|
||||
import org.junit.rules.ExternalResource;
|
||||
import org.junit.rules.RuleChain;
|
||||
import org.junit.rules.TestName;
|
||||
import org.junit.rules.TestRule;
|
||||
|
||||
public abstract class AbstractTestAsyncTableScan {
|
||||
|
||||
protected static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
|
||||
protected static final OpenTelemetryClassRule otelClassRule = OpenTelemetryClassRule.create();
|
||||
protected static final MiniClusterRule miniClusterRule = MiniClusterRule.newBuilder()
|
||||
.setMiniClusterOption(StartMiniClusterOption.builder().numWorkers(3).build()).build();
|
||||
|
||||
protected static final ConnectionRule connectionRule =
|
||||
ConnectionRule.createAsyncConnectionRule(miniClusterRule::createAsyncConnection);
|
||||
|
||||
private static final class Setup extends ExternalResource {
|
||||
@Override
|
||||
protected void before() throws Throwable {
|
||||
final HBaseTestingUtility testingUtil = miniClusterRule.getTestingUtility();
|
||||
final AsyncConnection conn = connectionRule.getAsyncConnection();
|
||||
|
||||
byte[][] splitKeys = new byte[8][];
|
||||
for (int i = 111; i < 999; i += 111) {
|
||||
splitKeys[i / 111 - 1] = Bytes.toBytes(String.format("%03d", i));
|
||||
}
|
||||
testingUtil.createTable(TABLE_NAME, FAMILY, splitKeys);
|
||||
testingUtil.waitTableAvailable(TABLE_NAME);
|
||||
conn.getTable(TABLE_NAME)
|
||||
.putAll(IntStream.range(0, COUNT)
|
||||
.mapToObj(i -> new Put(Bytes.toBytes(String.format("%03d", i)))
|
||||
.addColumn(FAMILY, CQ1, Bytes.toBytes(i))
|
||||
.addColumn(FAMILY, CQ2, Bytes.toBytes(i * i)))
|
||||
.collect(Collectors.toList()))
|
||||
.get();
|
||||
}
|
||||
}
|
||||
|
||||
@ClassRule
|
||||
public static final TestRule classRule = RuleChain.outerRule(otelClassRule)
|
||||
.around(miniClusterRule).around(connectionRule).around(new Setup());
|
||||
|
||||
@Rule
|
||||
public final OpenTelemetryTestRule otelTestRule = new OpenTelemetryTestRule(otelClassRule);
|
||||
|
||||
@Rule
|
||||
public final TestName testName = new TestName();
|
||||
|
||||
protected static TableName TABLE_NAME = TableName.valueOf("async");
|
||||
|
||||
|
@ -52,53 +113,29 @@ public abstract class AbstractTestAsyncTableScan {
|
|||
|
||||
protected static int COUNT = 1000;
|
||||
|
||||
protected static AsyncConnection ASYNC_CONN;
|
||||
|
||||
@BeforeClass
|
||||
public static void setUp() throws Exception {
|
||||
TEST_UTIL.startMiniCluster(3);
|
||||
byte[][] splitKeys = new byte[8][];
|
||||
for (int i = 111; i < 999; i += 111) {
|
||||
splitKeys[i / 111 - 1] = Bytes.toBytes(String.format("%03d", i));
|
||||
}
|
||||
TEST_UTIL.createTable(TABLE_NAME, FAMILY, splitKeys);
|
||||
TEST_UTIL.waitTableAvailable(TABLE_NAME);
|
||||
ASYNC_CONN = ConnectionFactory.createAsyncConnection(TEST_UTIL.getConfiguration()).get();
|
||||
ASYNC_CONN.getTable(TABLE_NAME).putAll(IntStream.range(0, COUNT)
|
||||
.mapToObj(i -> new Put(Bytes.toBytes(String.format("%03d", i)))
|
||||
.addColumn(FAMILY, CQ1, Bytes.toBytes(i)).addColumn(FAMILY, CQ2, Bytes.toBytes(i * i)))
|
||||
.collect(Collectors.toList())).get();
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void tearDown() throws Exception {
|
||||
ASYNC_CONN.close();
|
||||
TEST_UTIL.shutdownMiniCluster();
|
||||
}
|
||||
|
||||
protected static Scan createNormalScan() {
|
||||
private static Scan createNormalScan() {
|
||||
return new Scan();
|
||||
}
|
||||
|
||||
protected static Scan createBatchScan() {
|
||||
private static Scan createBatchScan() {
|
||||
return new Scan().setBatch(1);
|
||||
}
|
||||
|
||||
// set a small result size for testing flow control
|
||||
protected static Scan createSmallResultSizeScan() {
|
||||
private static Scan createSmallResultSizeScan() {
|
||||
return new Scan().setMaxResultSize(1);
|
||||
}
|
||||
|
||||
protected static Scan createBatchSmallResultSizeScan() {
|
||||
private static Scan createBatchSmallResultSizeScan() {
|
||||
return new Scan().setBatch(1).setMaxResultSize(1);
|
||||
}
|
||||
|
||||
protected static AsyncTable<?> getRawTable() {
|
||||
return ASYNC_CONN.getTable(TABLE_NAME);
|
||||
private static AsyncTable<?> getRawTable() {
|
||||
return connectionRule.getAsyncConnection().getTable(TABLE_NAME);
|
||||
}
|
||||
|
||||
protected static AsyncTable<?> getTable() {
|
||||
return ASYNC_CONN.getTable(TABLE_NAME, ForkJoinPool.commonPool());
|
||||
private static AsyncTable<?> getTable() {
|
||||
return connectionRule.getAsyncConnection().getTable(TABLE_NAME, ForkJoinPool.commonPool());
|
||||
}
|
||||
|
||||
private static List<Pair<String, Supplier<Scan>>> getScanCreator() {
|
||||
|
@ -132,8 +169,18 @@ public abstract class AbstractTestAsyncTableScan {
|
|||
|
||||
protected abstract List<Result> doScan(Scan scan, int closeAfter) throws Exception;
|
||||
|
||||
/**
|
||||
* Used by implementation classes to assert the correctness of spans produced under test.
|
||||
*/
|
||||
protected abstract void assertTraceContinuity();
|
||||
|
||||
/**
|
||||
* Used by implementation classes to assert the correctness of spans having errors.
|
||||
*/
|
||||
protected abstract void assertTraceError(final Matcher<String> exceptionTypeNameMatcher);
|
||||
|
||||
protected final List<Result> convertFromBatchResult(List<Result> results) {
|
||||
assertTrue(results.size() % 2 == 0);
|
||||
assertEquals(0, results.size() % 2);
|
||||
return IntStream.range(0, results.size() / 2).mapToObj(i -> {
|
||||
try {
|
||||
return Result
|
||||
|
@ -144,15 +191,21 @@ public abstract class AbstractTestAsyncTableScan {
|
|||
}).collect(Collectors.toList());
|
||||
}
|
||||
|
||||
protected static void waitForSpan(final Matcher<SpanData> parentSpanMatcher) {
|
||||
final Configuration conf = miniClusterRule.getTestingUtility().getConfiguration();
|
||||
Waiter.waitFor(conf, TimeUnit.SECONDS.toMillis(5), new MatcherPredicate<>(
|
||||
"Span for test failed to complete.", otelClassRule::getSpans, hasItem(parentSpanMatcher)));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testScanAll() throws Exception {
|
||||
List<Result> results = doScan(createScan(), -1);
|
||||
// make sure all scanners are closed at RS side
|
||||
TEST_UTIL.getHBaseCluster().getRegionServerThreads().stream().map(t -> t.getRegionServer())
|
||||
.forEach(
|
||||
rs -> assertEquals(
|
||||
"The scanner count of " + rs.getServerName() + " is " +
|
||||
rs.getRSRpcServices().getScannersCount(),
|
||||
miniClusterRule.getTestingUtility().getHBaseCluster().getRegionServerThreads().stream()
|
||||
.map(JVMClusterUtil.RegionServerThread::getRegionServer)
|
||||
.forEach(rs -> assertEquals(
|
||||
"The scanner count of " + rs.getServerName() + " is "
|
||||
+ rs.getRSRpcServices().getScannersCount(),
|
||||
0, rs.getRSRpcServices().getScannersCount()));
|
||||
assertEquals(COUNT, results.size());
|
||||
IntStream.range(0, COUNT).forEach(i -> {
|
||||
|
@ -170,37 +223,54 @@ public abstract class AbstractTestAsyncTableScan {
|
|||
|
||||
@Test
|
||||
public void testReversedScanAll() throws Exception {
|
||||
List<Result> results = doScan(createScan().setReversed(true), -1);
|
||||
List<Result> results =
|
||||
TraceUtil.trace(() -> doScan(createScan().setReversed(true), -1), testName.getMethodName());
|
||||
assertEquals(COUNT, results.size());
|
||||
IntStream.range(0, COUNT).forEach(i -> assertResultEquals(results.get(i), COUNT - i - 1));
|
||||
assertTraceContinuity();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testScanNoStopKey() throws Exception {
|
||||
int start = 345;
|
||||
List<Result> results =
|
||||
doScan(createScan().withStartRow(Bytes.toBytes(String.format("%03d", start))), -1);
|
||||
List<Result> results = TraceUtil.trace(
|
||||
() -> doScan(createScan().withStartRow(Bytes.toBytes(String.format("%03d", start))), -1),
|
||||
testName.getMethodName());
|
||||
assertEquals(COUNT - start, results.size());
|
||||
IntStream.range(0, COUNT - start).forEach(i -> assertResultEquals(results.get(i), start + i));
|
||||
assertTraceContinuity();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testReverseScanNoStopKey() throws Exception {
|
||||
int start = 765;
|
||||
List<Result> results = doScan(
|
||||
createScan().withStartRow(Bytes.toBytes(String.format("%03d", start))).setReversed(true), -1);
|
||||
final Scan scan =
|
||||
createScan().withStartRow(Bytes.toBytes(String.format("%03d", start))).setReversed(true);
|
||||
List<Result> results = TraceUtil.trace(() -> doScan(scan, -1), testName.getMethodName());
|
||||
assertEquals(start + 1, results.size());
|
||||
IntStream.range(0, start + 1).forEach(i -> assertResultEquals(results.get(i), start - i));
|
||||
assertTraceContinuity();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testScanWrongColumnFamily() throws Exception {
|
||||
try {
|
||||
doScan(createScan().addFamily(Bytes.toBytes("WrongColumnFamily")), -1);
|
||||
} catch (Exception e) {
|
||||
assertTrue(e instanceof NoSuchColumnFamilyException ||
|
||||
e.getCause() instanceof NoSuchColumnFamilyException);
|
||||
public void testScanWrongColumnFamily() {
|
||||
final Exception e = assertThrows(Exception.class,
|
||||
() -> TraceUtil.trace(
|
||||
() -> doScan(createScan().addFamily(Bytes.toBytes("WrongColumnFamily")), -1),
|
||||
testName.getMethodName()));
|
||||
// hamcrest generic enforcement for `anyOf` is a pain; skip it
|
||||
// but -- don't we always unwrap ExecutionExceptions -- bug?
|
||||
if (e instanceof NoSuchColumnFamilyException) {
|
||||
final NoSuchColumnFamilyException ex = (NoSuchColumnFamilyException) e;
|
||||
assertThat(ex, isA(NoSuchColumnFamilyException.class));
|
||||
} else if (e instanceof ExecutionException) {
|
||||
final ExecutionException ex = (ExecutionException) e;
|
||||
assertThat(ex, allOf(isA(ExecutionException.class),
|
||||
hasProperty("cause", isA(NoSuchColumnFamilyException.class))));
|
||||
} else {
|
||||
fail("Found unexpected Exception " + e);
|
||||
}
|
||||
assertTraceError(endsWith(NoSuchColumnFamilyException.class.getName()));
|
||||
}
|
||||
|
||||
private void testScan(int start, boolean startInclusive, int stop, boolean stopInclusive,
|
||||
|
@ -232,8 +302,8 @@ public abstract class AbstractTestAsyncTableScan {
|
|||
|
||||
private void testReversedScan(int start, boolean startInclusive, int stop, boolean stopInclusive,
|
||||
int limit) throws Exception {
|
||||
Scan scan =
|
||||
createScan().withStartRow(Bytes.toBytes(String.format("%03d", start)), startInclusive)
|
||||
Scan scan = createScan()
|
||||
.withStartRow(Bytes.toBytes(String.format("%03d", start)), startInclusive)
|
||||
.withStopRow(Bytes.toBytes(String.format("%03d", stop)), stopInclusive).setReversed(true);
|
||||
if (limit > 0) {
|
||||
scan.setLimit(limit);
|
||||
|
|
|
@ -0,0 +1,35 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
/**
|
||||
* Advise the scanning infrastructure to collect up to {@code limit} results.
|
||||
*/
|
||||
class LimitedScanResultConsumer extends SimpleScanResultConsumerImpl {
|
||||
|
||||
private final int limit;
|
||||
|
||||
public LimitedScanResultConsumer(int limit) {
|
||||
this.limit = limit;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized boolean onNext(Result result) {
|
||||
return super.onNext(result) && results.size() < limit;
|
||||
}
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -17,59 +17,15 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import org.apache.hbase.thirdparty.com.google.common.base.Throwables;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
|
||||
|
||||
final class SimpleScanResultConsumer implements ScanResultConsumer {
|
||||
/**
|
||||
* A simplistic {@link ScanResultConsumer} for use in tests.
|
||||
*/
|
||||
public interface SimpleScanResultConsumer extends ScanResultConsumer {
|
||||
|
||||
private ScanMetrics scanMetrics;
|
||||
List<Result> getAll() throws Exception;
|
||||
|
||||
private final List<Result> results = new ArrayList<>();
|
||||
|
||||
private Throwable error;
|
||||
|
||||
private boolean finished = false;
|
||||
|
||||
@Override
|
||||
public void onScanMetricsCreated(ScanMetrics scanMetrics) {
|
||||
this.scanMetrics = scanMetrics;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized boolean onNext(Result result) {
|
||||
results.add(result);
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void onError(Throwable error) {
|
||||
this.error = error;
|
||||
finished = true;
|
||||
notifyAll();
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void onComplete() {
|
||||
finished = true;
|
||||
notifyAll();
|
||||
}
|
||||
|
||||
public synchronized List<Result> getAll() throws Exception {
|
||||
while (!finished) {
|
||||
wait();
|
||||
}
|
||||
if (error != null) {
|
||||
Throwables.propagateIfPossible(error, Exception.class);
|
||||
throw new Exception(error);
|
||||
}
|
||||
return results;
|
||||
}
|
||||
|
||||
public ScanMetrics getScanMetrics() {
|
||||
return scanMetrics;
|
||||
}
|
||||
ScanMetrics getScanMetrics();
|
||||
}
|
||||
|
|
|
@ -0,0 +1,76 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
|
||||
|
||||
import org.apache.hbase.thirdparty.com.google.common.base.Throwables;
|
||||
|
||||
class SimpleScanResultConsumerImpl implements SimpleScanResultConsumer {
|
||||
|
||||
private ScanMetrics scanMetrics;
|
||||
|
||||
protected final List<Result> results = new ArrayList<>();
|
||||
|
||||
private Throwable error;
|
||||
|
||||
private boolean finished = false;
|
||||
|
||||
@Override
|
||||
public void onScanMetricsCreated(ScanMetrics scanMetrics) {
|
||||
this.scanMetrics = scanMetrics;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized boolean onNext(Result result) {
|
||||
results.add(result);
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void onError(Throwable error) {
|
||||
this.error = error;
|
||||
finished = true;
|
||||
notifyAll();
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void onComplete() {
|
||||
finished = true;
|
||||
notifyAll();
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized List<Result> getAll() throws Exception {
|
||||
while (!finished) {
|
||||
wait();
|
||||
}
|
||||
if (error != null) {
|
||||
Throwables.propagateIfPossible(error, Exception.class);
|
||||
throw new Exception(error);
|
||||
}
|
||||
return results;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ScanMetrics getScanMetrics() {
|
||||
return scanMetrics;
|
||||
}
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -17,24 +17,41 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import static org.apache.hadoop.hbase.client.trace.hamcrest.SpanDataMatchers.hasEnded;
|
||||
import static org.apache.hadoop.hbase.client.trace.hamcrest.SpanDataMatchers.hasExceptionWithType;
|
||||
import static org.apache.hadoop.hbase.client.trace.hamcrest.SpanDataMatchers.hasName;
|
||||
import static org.apache.hadoop.hbase.client.trace.hamcrest.SpanDataMatchers.hasParentSpanId;
|
||||
import static org.apache.hadoop.hbase.client.trace.hamcrest.SpanDataMatchers.hasStatusWithCode;
|
||||
import static org.hamcrest.MatcherAssert.assertThat;
|
||||
import static org.hamcrest.Matchers.allOf;
|
||||
import static org.hamcrest.Matchers.hasItem;
|
||||
import static org.hamcrest.Matchers.startsWith;
|
||||
|
||||
import io.opentelemetry.api.trace.StatusCode;
|
||||
import io.opentelemetry.sdk.trace.data.SpanData;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.concurrent.ForkJoinPool;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.Collectors;
|
||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||
import org.apache.hadoop.hbase.client.trace.StringTraceRenderer;
|
||||
import org.apache.hadoop.hbase.testclassification.ClientTests;
|
||||
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
||||
import org.hamcrest.Matcher;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.experimental.categories.Category;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.junit.runners.Parameterized;
|
||||
import org.junit.runners.Parameterized.Parameter;
|
||||
import org.junit.runners.Parameterized.Parameters;
|
||||
import org.apache.hbase.thirdparty.com.google.common.base.Throwables;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
@RunWith(Parameterized.class)
|
||||
@Category({ LargeTests.class, ClientTests.class })
|
||||
public class TestAsyncTableScan extends AbstractTestAsyncTableScan {
|
||||
private static final Logger logger = LoggerFactory.getLogger(TestAsyncTableScan.class);
|
||||
|
||||
@ClassRule
|
||||
public static final HBaseClassTestRule CLASS_RULE =
|
||||
|
@ -59,7 +76,7 @@ public class TestAsyncTableScan extends AbstractTestAsyncTableScan {
|
|||
@Override
|
||||
protected List<Result> doScan(Scan scan, int closeAfter) throws Exception {
|
||||
AsyncTable<ScanResultConsumer> table =
|
||||
ASYNC_CONN.getTable(TABLE_NAME, ForkJoinPool.commonPool());
|
||||
connectionRule.getAsyncConnection().getTable(TABLE_NAME, ForkJoinPool.commonPool());
|
||||
List<Result> results;
|
||||
if (closeAfter > 0) {
|
||||
// these tests batch settings with the sample data result in each result being
|
||||
|
@ -68,11 +85,13 @@ public class TestAsyncTableScan extends AbstractTestAsyncTableScan {
|
|||
if (scan.getBatch() > 0) {
|
||||
closeAfter = closeAfter * 2;
|
||||
}
|
||||
LimitedScanResultConsumer consumer = new LimitedScanResultConsumer(closeAfter);
|
||||
TracedScanResultConsumer consumer =
|
||||
new TracedScanResultConsumer(new LimitedScanResultConsumer(closeAfter));
|
||||
table.scan(scan, consumer);
|
||||
results = consumer.getAll();
|
||||
} else {
|
||||
SimpleScanResultConsumer consumer = new SimpleScanResultConsumer();
|
||||
TracedScanResultConsumer consumer =
|
||||
new TracedScanResultConsumer(new SimpleScanResultConsumerImpl());
|
||||
table.scan(scan, consumer);
|
||||
results = consumer.getAll();
|
||||
}
|
||||
|
@ -82,49 +101,77 @@ public class TestAsyncTableScan extends AbstractTestAsyncTableScan {
|
|||
return results;
|
||||
}
|
||||
|
||||
private static class LimitedScanResultConsumer implements ScanResultConsumer {
|
||||
@Override
|
||||
protected void assertTraceContinuity() {
|
||||
final String parentSpanName = testName.getMethodName();
|
||||
final Matcher<SpanData> parentSpanMatcher =
|
||||
allOf(hasName(parentSpanName), hasStatusWithCode(StatusCode.OK), hasEnded());
|
||||
waitForSpan(parentSpanMatcher);
|
||||
|
||||
private final int limit;
|
||||
|
||||
public LimitedScanResultConsumer(int limit) {
|
||||
this.limit = limit;
|
||||
final List<SpanData> spans =
|
||||
otelClassRule.getSpans().stream().filter(Objects::nonNull).collect(Collectors.toList());
|
||||
if (logger.isDebugEnabled()) {
|
||||
StringTraceRenderer stringTraceRenderer = new StringTraceRenderer(spans);
|
||||
stringTraceRenderer.render(logger::debug);
|
||||
}
|
||||
|
||||
private final List<Result> results = new ArrayList<>();
|
||||
final String parentSpanId = spans.stream().filter(parentSpanMatcher::matches)
|
||||
.map(SpanData::getSpanId).findAny().orElseThrow(AssertionError::new);
|
||||
|
||||
private Throwable error;
|
||||
final Matcher<SpanData> scanOperationSpanMatcher =
|
||||
allOf(hasName(startsWith("SCAN " + TABLE_NAME.getNameWithNamespaceInclAsString())),
|
||||
hasParentSpanId(parentSpanId), hasStatusWithCode(StatusCode.OK), hasEnded());
|
||||
assertThat(spans, hasItem(scanOperationSpanMatcher));
|
||||
final String scanOperationSpanId = spans.stream().filter(scanOperationSpanMatcher::matches)
|
||||
.map(SpanData::getSpanId).findAny().orElseThrow(AssertionError::new);
|
||||
|
||||
private boolean finished = false;
|
||||
final Matcher<SpanData> onScanMetricsCreatedMatcher =
|
||||
hasName("TracedScanResultConsumer#onScanMetricsCreated");
|
||||
assertThat(spans, hasItem(onScanMetricsCreatedMatcher));
|
||||
spans.stream().filter(onScanMetricsCreatedMatcher::matches).forEach(span -> assertThat(span,
|
||||
allOf(onScanMetricsCreatedMatcher, hasParentSpanId(scanOperationSpanId), hasEnded())));
|
||||
|
||||
@Override
|
||||
public synchronized boolean onNext(Result result) {
|
||||
results.add(result);
|
||||
return results.size() < limit;
|
||||
final Matcher<SpanData> onNextMatcher = hasName("TracedScanResultConsumer#onNext");
|
||||
assertThat(spans, hasItem(onNextMatcher));
|
||||
spans.stream().filter(onNextMatcher::matches)
|
||||
.forEach(span -> assertThat(span, allOf(onNextMatcher, hasParentSpanId(scanOperationSpanId),
|
||||
hasStatusWithCode(StatusCode.OK), hasEnded())));
|
||||
|
||||
final Matcher<SpanData> onCompleteMatcher = hasName("TracedScanResultConsumer#onComplete");
|
||||
assertThat(spans, hasItem(onCompleteMatcher));
|
||||
spans.stream().filter(onCompleteMatcher::matches)
|
||||
.forEach(span -> assertThat(span, allOf(onCompleteMatcher,
|
||||
hasParentSpanId(scanOperationSpanId), hasStatusWithCode(StatusCode.OK), hasEnded())));
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void onError(Throwable error) {
|
||||
this.error = error;
|
||||
finished = true;
|
||||
notifyAll();
|
||||
protected void assertTraceError(Matcher<String> exceptionTypeNameMatcher) {
|
||||
final String parentSpanName = testName.getMethodName();
|
||||
final Matcher<SpanData> parentSpanMatcher = allOf(hasName(parentSpanName), hasEnded());
|
||||
waitForSpan(parentSpanMatcher);
|
||||
|
||||
final List<SpanData> spans =
|
||||
otelClassRule.getSpans().stream().filter(Objects::nonNull).collect(Collectors.toList());
|
||||
if (logger.isDebugEnabled()) {
|
||||
StringTraceRenderer stringTraceRenderer = new StringTraceRenderer(spans);
|
||||
stringTraceRenderer.render(logger::debug);
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void onComplete() {
|
||||
finished = true;
|
||||
notifyAll();
|
||||
}
|
||||
final String parentSpanId = spans.stream().filter(parentSpanMatcher::matches)
|
||||
.map(SpanData::getSpanId).findAny().orElseThrow(AssertionError::new);
|
||||
|
||||
public synchronized List<Result> getAll() throws Exception {
|
||||
while (!finished) {
|
||||
wait();
|
||||
}
|
||||
if (error != null) {
|
||||
Throwables.propagateIfPossible(error, Exception.class);
|
||||
throw new Exception(error);
|
||||
}
|
||||
return results;
|
||||
}
|
||||
}
|
||||
final Matcher<SpanData> scanOperationSpanMatcher =
|
||||
allOf(hasName(startsWith("SCAN " + TABLE_NAME.getNameWithNamespaceInclAsString())),
|
||||
hasParentSpanId(parentSpanId), hasStatusWithCode(StatusCode.ERROR),
|
||||
hasExceptionWithType(exceptionTypeNameMatcher), hasEnded());
|
||||
assertThat(spans, hasItem(scanOperationSpanMatcher));
|
||||
final String scanOperationSpanId = spans.stream().filter(scanOperationSpanMatcher::matches)
|
||||
.map(SpanData::getSpanId).findAny().orElseThrow(AssertionError::new);
|
||||
|
||||
final Matcher<SpanData> onErrorMatcher = hasName("TracedScanResultConsumer#onError");
|
||||
assertThat(spans, hasItem(onErrorMatcher));
|
||||
spans.stream().filter(onErrorMatcher::matches)
|
||||
.forEach(span -> assertThat(span, allOf(onErrorMatcher,
|
||||
hasParentSpanId(scanOperationSpanId), hasStatusWithCode(StatusCode.OK), hasEnded())));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,21 +17,40 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import static org.apache.hadoop.hbase.client.trace.hamcrest.SpanDataMatchers.hasEnded;
|
||||
import static org.apache.hadoop.hbase.client.trace.hamcrest.SpanDataMatchers.hasExceptionWithType;
|
||||
import static org.apache.hadoop.hbase.client.trace.hamcrest.SpanDataMatchers.hasName;
|
||||
import static org.apache.hadoop.hbase.client.trace.hamcrest.SpanDataMatchers.hasParentSpanId;
|
||||
import static org.apache.hadoop.hbase.client.trace.hamcrest.SpanDataMatchers.hasStatusWithCode;
|
||||
import static org.hamcrest.MatcherAssert.assertThat;
|
||||
import static org.hamcrest.Matchers.allOf;
|
||||
import static org.hamcrest.Matchers.hasItem;
|
||||
import static org.hamcrest.Matchers.startsWith;
|
||||
|
||||
import io.opentelemetry.api.trace.StatusCode;
|
||||
import io.opentelemetry.sdk.trace.data.SpanData;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.Collectors;
|
||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||
import org.apache.hadoop.hbase.client.trace.StringTraceRenderer;
|
||||
import org.apache.hadoop.hbase.testclassification.ClientTests;
|
||||
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
||||
import org.hamcrest.Matcher;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.experimental.categories.Category;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.junit.runners.Parameterized;
|
||||
import org.junit.runners.Parameterized.Parameter;
|
||||
import org.junit.runners.Parameterized.Parameters;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
@RunWith(Parameterized.class)
|
||||
@Category({ LargeTests.class, ClientTests.class })
|
||||
public class TestAsyncTableScanAll extends AbstractTestAsyncTableScan {
|
||||
private static final Logger logger = LoggerFactory.getLogger(TestAsyncTableScanAll.class);
|
||||
|
||||
@ClassRule
|
||||
public static final HBaseClassTestRule CLASS_RULE =
|
||||
|
@ -72,4 +91,50 @@ public class TestAsyncTableScanAll extends AbstractTestAsyncTableScan {
|
|||
}
|
||||
return results;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void assertTraceContinuity() {
|
||||
final String parentSpanName = testName.getMethodName();
|
||||
final Matcher<SpanData> parentSpanMatcher =
|
||||
allOf(hasName(parentSpanName), hasStatusWithCode(StatusCode.OK), hasEnded());
|
||||
waitForSpan(parentSpanMatcher);
|
||||
|
||||
final List<SpanData> spans =
|
||||
otelClassRule.getSpans().stream().filter(Objects::nonNull).collect(Collectors.toList());
|
||||
if (logger.isDebugEnabled()) {
|
||||
StringTraceRenderer stringTraceRenderer = new StringTraceRenderer(spans);
|
||||
stringTraceRenderer.render(logger::debug);
|
||||
}
|
||||
|
||||
final String parentSpanId = spans.stream().filter(parentSpanMatcher::matches)
|
||||
.map(SpanData::getSpanId).findAny().orElseThrow(AssertionError::new);
|
||||
|
||||
final Matcher<SpanData> scanOperationSpanMatcher =
|
||||
allOf(hasName(startsWith("SCAN " + TABLE_NAME.getNameWithNamespaceInclAsString())),
|
||||
hasParentSpanId(parentSpanId), hasStatusWithCode(StatusCode.OK), hasEnded());
|
||||
assertThat(spans, hasItem(scanOperationSpanMatcher));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void assertTraceError(Matcher<String> exceptionTypeNameMatcher) {
|
||||
final String parentSpanName = testName.getMethodName();
|
||||
final Matcher<SpanData> parentSpanMatcher = allOf(hasName(parentSpanName), hasEnded());
|
||||
waitForSpan(parentSpanMatcher);
|
||||
|
||||
final List<SpanData> spans =
|
||||
otelClassRule.getSpans().stream().filter(Objects::nonNull).collect(Collectors.toList());
|
||||
if (logger.isDebugEnabled()) {
|
||||
StringTraceRenderer stringTraceRenderer = new StringTraceRenderer(spans);
|
||||
stringTraceRenderer.render(logger::debug);
|
||||
}
|
||||
|
||||
final String parentSpanId = spans.stream().filter(parentSpanMatcher::matches)
|
||||
.map(SpanData::getSpanId).findAny().orElseThrow(AssertionError::new);
|
||||
|
||||
final Matcher<SpanData> scanOperationSpanMatcher =
|
||||
allOf(hasName(startsWith("SCAN " + TABLE_NAME.getNameWithNamespaceInclAsString())),
|
||||
hasParentSpanId(parentSpanId), hasStatusWithCode(StatusCode.ERROR),
|
||||
hasExceptionWithType(exceptionTypeNameMatcher), hasEnded());
|
||||
assertThat(spans, hasItem(scanOperationSpanMatcher));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -123,7 +123,7 @@ public class TestAsyncTableScanMetrics {
|
|||
|
||||
private static Pair<List<Result>, ScanMetrics> doScanWithAsyncTableScan(Scan scan)
|
||||
throws Exception {
|
||||
SimpleScanResultConsumer consumer = new SimpleScanResultConsumer();
|
||||
SimpleScanResultConsumerImpl consumer = new SimpleScanResultConsumerImpl();
|
||||
CONN.getTable(TABLE_NAME, ForkJoinPool.commonPool()).scan(scan, consumer);
|
||||
return Pair.newPair(consumer.getAll(), consumer.getScanMetrics());
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -17,23 +17,42 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import static org.apache.hadoop.hbase.client.trace.hamcrest.SpanDataMatchers.hasEnded;
|
||||
import static org.apache.hadoop.hbase.client.trace.hamcrest.SpanDataMatchers.hasExceptionWithType;
|
||||
import static org.apache.hadoop.hbase.client.trace.hamcrest.SpanDataMatchers.hasName;
|
||||
import static org.apache.hadoop.hbase.client.trace.hamcrest.SpanDataMatchers.hasParentSpanId;
|
||||
import static org.apache.hadoop.hbase.client.trace.hamcrest.SpanDataMatchers.hasStatusWithCode;
|
||||
import static org.hamcrest.MatcherAssert.assertThat;
|
||||
import static org.hamcrest.Matchers.allOf;
|
||||
import static org.hamcrest.Matchers.hasItem;
|
||||
import static org.hamcrest.Matchers.startsWith;
|
||||
|
||||
import io.opentelemetry.api.trace.StatusCode;
|
||||
import io.opentelemetry.sdk.trace.data.SpanData;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.concurrent.ForkJoinPool;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.Collectors;
|
||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||
import org.apache.hadoop.hbase.client.trace.StringTraceRenderer;
|
||||
import org.apache.hadoop.hbase.testclassification.ClientTests;
|
||||
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
||||
import org.hamcrest.Matcher;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.experimental.categories.Category;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.junit.runners.Parameterized;
|
||||
import org.junit.runners.Parameterized.Parameter;
|
||||
import org.junit.runners.Parameterized.Parameters;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
@RunWith(Parameterized.class)
|
||||
@Category({ LargeTests.class, ClientTests.class })
|
||||
public class TestAsyncTableScanner extends AbstractTestAsyncTableScan {
|
||||
private static final Logger logger = LoggerFactory.getLogger(TestAsyncTableScanner.class);
|
||||
|
||||
@ClassRule
|
||||
public static final HBaseClassTestRule CLASS_RULE =
|
||||
|
@ -63,7 +82,8 @@ public class TestAsyncTableScanner extends AbstractTestAsyncTableScan {
|
|||
|
||||
@Override
|
||||
protected List<Result> doScan(Scan scan, int closeAfter) throws Exception {
|
||||
AsyncTable<?> table = ASYNC_CONN.getTable(TABLE_NAME, ForkJoinPool.commonPool());
|
||||
AsyncTable<?> table =
|
||||
connectionRule.getAsyncConnection().getTable(TABLE_NAME, ForkJoinPool.commonPool());
|
||||
List<Result> results = new ArrayList<>();
|
||||
// these tests batch settings with the sample data result in each result being
|
||||
// split in two. so we must allow twice the expected results in order to reach
|
||||
|
@ -84,4 +104,49 @@ public class TestAsyncTableScanner extends AbstractTestAsyncTableScan {
|
|||
}
|
||||
return results;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void assertTraceContinuity() {
|
||||
final String parentSpanName = testName.getMethodName();
|
||||
final Matcher<SpanData> parentSpanMatcher =
|
||||
allOf(hasName(parentSpanName), hasStatusWithCode(StatusCode.OK), hasEnded());
|
||||
waitForSpan(parentSpanMatcher);
|
||||
|
||||
final List<SpanData> spans =
|
||||
otelClassRule.getSpans().stream().filter(Objects::nonNull).collect(Collectors.toList());
|
||||
if (logger.isDebugEnabled()) {
|
||||
StringTraceRenderer stringTraceRenderer = new StringTraceRenderer(spans);
|
||||
stringTraceRenderer.render(logger::debug);
|
||||
}
|
||||
|
||||
final String parentSpanId = spans.stream().filter(parentSpanMatcher::matches)
|
||||
.map(SpanData::getSpanId).findAny().orElseThrow(AssertionError::new);
|
||||
|
||||
assertThat(spans,
|
||||
hasItem(allOf(hasName(startsWith("SCAN " + TABLE_NAME.getNameWithNamespaceInclAsString())),
|
||||
hasParentSpanId(parentSpanId), hasStatusWithCode(StatusCode.OK), hasEnded())));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void assertTraceError(Matcher<String> exceptionTypeNameMatcher) {
|
||||
final String parentSpanName = testName.getMethodName();
|
||||
final Matcher<SpanData> parentSpanMatcher = allOf(hasName(parentSpanName), hasEnded());
|
||||
waitForSpan(parentSpanMatcher);
|
||||
|
||||
final List<SpanData> spans =
|
||||
otelClassRule.getSpans().stream().filter(Objects::nonNull).collect(Collectors.toList());
|
||||
if (logger.isDebugEnabled()) {
|
||||
StringTraceRenderer stringTraceRenderer = new StringTraceRenderer(spans);
|
||||
stringTraceRenderer.render(logger::debug);
|
||||
}
|
||||
|
||||
final String parentSpanId = spans.stream().filter(parentSpanMatcher::matches)
|
||||
.map(SpanData::getSpanId).findAny().orElseThrow(AssertionError::new);
|
||||
|
||||
final Matcher<SpanData> scanOperationSpanMatcher =
|
||||
allOf(hasName(startsWith("SCAN " + TABLE_NAME.getNameWithNamespaceInclAsString())),
|
||||
hasParentSpanId(parentSpanId), hasStatusWithCode(StatusCode.ERROR),
|
||||
hasExceptionWithType(exceptionTypeNameMatcher), hasEnded());
|
||||
assertThat(spans, hasItem(scanOperationSpanMatcher));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -17,22 +17,42 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import static org.apache.hadoop.hbase.client.trace.hamcrest.SpanDataMatchers.hasEnded;
|
||||
import static org.apache.hadoop.hbase.client.trace.hamcrest.SpanDataMatchers.hasExceptionWithType;
|
||||
import static org.apache.hadoop.hbase.client.trace.hamcrest.SpanDataMatchers.hasName;
|
||||
import static org.apache.hadoop.hbase.client.trace.hamcrest.SpanDataMatchers.hasParentSpanId;
|
||||
import static org.apache.hadoop.hbase.client.trace.hamcrest.SpanDataMatchers.hasStatusWithCode;
|
||||
import static org.hamcrest.MatcherAssert.assertThat;
|
||||
import static org.hamcrest.Matchers.allOf;
|
||||
import static org.hamcrest.Matchers.hasItem;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
import static org.hamcrest.Matchers.startsWith;
|
||||
|
||||
import io.opentelemetry.api.trace.StatusCode;
|
||||
import io.opentelemetry.sdk.trace.data.SpanData;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.Collectors;
|
||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||
import org.apache.hadoop.hbase.client.trace.StringTraceRenderer;
|
||||
import org.apache.hadoop.hbase.testclassification.ClientTests;
|
||||
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
||||
import org.hamcrest.Matcher;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.experimental.categories.Category;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.junit.runners.Parameterized;
|
||||
import org.junit.runners.Parameterized.Parameter;
|
||||
import org.junit.runners.Parameterized.Parameters;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
@RunWith(Parameterized.class)
|
||||
@Category({ LargeTests.class, ClientTests.class })
|
||||
public class TestRawAsyncTableScan extends AbstractTestAsyncTableScan {
|
||||
private static final Logger logger = LoggerFactory.getLogger(TestRawAsyncTableScan.class);
|
||||
|
||||
@ClassRule
|
||||
public static final HBaseClassTestRule CLASS_RULE =
|
||||
|
@ -56,8 +76,8 @@ public class TestRawAsyncTableScan extends AbstractTestAsyncTableScan {
|
|||
|
||||
@Override
|
||||
protected List<Result> doScan(Scan scan, int closeAfter) throws Exception {
|
||||
BufferingScanResultConsumer scanConsumer = new BufferingScanResultConsumer();
|
||||
ASYNC_CONN.getTable(TABLE_NAME).scan(scan, scanConsumer);
|
||||
TracedAdvancedScanResultConsumer scanConsumer = new TracedAdvancedScanResultConsumer();
|
||||
connectionRule.getAsyncConnection().getTable(TABLE_NAME).scan(scan, scanConsumer);
|
||||
List<Result> results = new ArrayList<>();
|
||||
// these tests batch settings with the sample data result in each result being
|
||||
// split in two. so we must allow twice the expected results in order to reach
|
||||
|
@ -76,4 +96,79 @@ public class TestRawAsyncTableScan extends AbstractTestAsyncTableScan {
|
|||
}
|
||||
return results;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void assertTraceContinuity() {
|
||||
final String parentSpanName = testName.getMethodName();
|
||||
final Matcher<SpanData> parentSpanMatcher =
|
||||
allOf(hasName(parentSpanName), hasStatusWithCode(StatusCode.OK), hasEnded());
|
||||
waitForSpan(parentSpanMatcher);
|
||||
|
||||
final List<SpanData> spans =
|
||||
otelClassRule.getSpans().stream().filter(Objects::nonNull).collect(Collectors.toList());
|
||||
if (logger.isDebugEnabled()) {
|
||||
StringTraceRenderer stringTraceRenderer = new StringTraceRenderer(spans);
|
||||
stringTraceRenderer.render(logger::debug);
|
||||
}
|
||||
|
||||
final String parentSpanId = spans.stream().filter(parentSpanMatcher::matches)
|
||||
.map(SpanData::getSpanId).findAny().orElseThrow(AssertionError::new);
|
||||
|
||||
final Matcher<SpanData> scanOperationSpanMatcher =
|
||||
allOf(hasName(startsWith("SCAN " + TABLE_NAME.getNameWithNamespaceInclAsString())),
|
||||
hasParentSpanId(parentSpanId), hasStatusWithCode(StatusCode.OK), hasEnded());
|
||||
assertThat(spans, hasItem(scanOperationSpanMatcher));
|
||||
final String scanOperationSpanId = spans.stream().filter(scanOperationSpanMatcher::matches)
|
||||
.map(SpanData::getSpanId).findAny().orElseThrow(AssertionError::new);
|
||||
|
||||
// RawAsyncTableImpl never invokes the callback to `onScanMetricsCreated` -- bug?
|
||||
final Matcher<SpanData> onScanMetricsCreatedMatcher =
|
||||
hasName("TracedAdvancedScanResultConsumer#onScanMetricsCreated");
|
||||
assertThat(spans, not(hasItem(onScanMetricsCreatedMatcher)));
|
||||
|
||||
final Matcher<SpanData> onNextMatcher = hasName("TracedAdvancedScanResultConsumer#onNext");
|
||||
assertThat(spans, hasItem(onNextMatcher));
|
||||
spans.stream().filter(onNextMatcher::matches)
|
||||
.forEach(span -> assertThat(span, hasParentSpanId(scanOperationSpanId)));
|
||||
assertThat(spans, hasItem(allOf(onNextMatcher, hasParentSpanId(scanOperationSpanId),
|
||||
hasStatusWithCode(StatusCode.OK), hasEnded())));
|
||||
|
||||
final Matcher<SpanData> onCompleteMatcher =
|
||||
hasName("TracedAdvancedScanResultConsumer#onComplete");
|
||||
assertThat(spans, hasItem(onCompleteMatcher));
|
||||
spans.stream().filter(onCompleteMatcher::matches)
|
||||
.forEach(span -> assertThat(span, allOf(onCompleteMatcher,
|
||||
hasParentSpanId(scanOperationSpanId), hasStatusWithCode(StatusCode.OK), hasEnded())));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void assertTraceError(Matcher<String> exceptionTypeNameMatcher) {
|
||||
final String parentSpanName = testName.getMethodName();
|
||||
final Matcher<SpanData> parentSpanMatcher = allOf(hasName(parentSpanName), hasEnded());
|
||||
waitForSpan(parentSpanMatcher);
|
||||
|
||||
final List<SpanData> spans =
|
||||
otelClassRule.getSpans().stream().filter(Objects::nonNull).collect(Collectors.toList());
|
||||
if (logger.isDebugEnabled()) {
|
||||
StringTraceRenderer stringTraceRenderer = new StringTraceRenderer(spans);
|
||||
stringTraceRenderer.render(logger::debug);
|
||||
}
|
||||
|
||||
final String parentSpanId = spans.stream().filter(parentSpanMatcher::matches)
|
||||
.map(SpanData::getSpanId).findAny().orElseThrow(AssertionError::new);
|
||||
|
||||
final Matcher<SpanData> scanOperationSpanMatcher =
|
||||
allOf(hasName(startsWith("SCAN " + TABLE_NAME.getNameWithNamespaceInclAsString())),
|
||||
hasParentSpanId(parentSpanId), hasStatusWithCode(StatusCode.ERROR),
|
||||
hasExceptionWithType(exceptionTypeNameMatcher), hasEnded());
|
||||
assertThat(spans, hasItem(scanOperationSpanMatcher));
|
||||
final String scanOperationSpanId = spans.stream().filter(scanOperationSpanMatcher::matches)
|
||||
.map(SpanData::getSpanId).findAny().orElseThrow(AssertionError::new);
|
||||
|
||||
final Matcher<SpanData> onCompleteMatcher = hasName("TracedAdvancedScanResultConsumer#onError");
|
||||
assertThat(spans, hasItem(onCompleteMatcher));
|
||||
spans.stream().filter(onCompleteMatcher::matches)
|
||||
.forEach(span -> assertThat(span, allOf(onCompleteMatcher,
|
||||
hasParentSpanId(scanOperationSpanId), hasStatusWithCode(StatusCode.OK), hasEnded())));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,292 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import static org.apache.hadoop.hbase.client.trace.hamcrest.SpanDataMatchers.hasEnded;
|
||||
import static org.apache.hadoop.hbase.client.trace.hamcrest.SpanDataMatchers.hasName;
|
||||
import static org.apache.hadoop.hbase.client.trace.hamcrest.SpanDataMatchers.hasParentSpanId;
|
||||
import static org.apache.hadoop.hbase.client.trace.hamcrest.SpanDataMatchers.hasStatusWithCode;
|
||||
import static org.hamcrest.MatcherAssert.assertThat;
|
||||
import static org.hamcrest.Matchers.allOf;
|
||||
import static org.hamcrest.Matchers.emptyIterable;
|
||||
import static org.hamcrest.Matchers.hasItem;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
import static org.hamcrest.Matchers.startsWith;
|
||||
|
||||
import io.opentelemetry.api.trace.StatusCode;
|
||||
import io.opentelemetry.sdk.trace.data.SpanData;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.IntStream;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.ConnectionRule;
|
||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.MatcherPredicate;
|
||||
import org.apache.hadoop.hbase.MiniClusterRule;
|
||||
import org.apache.hadoop.hbase.StartMiniClusterOption;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.Waiter;
|
||||
import org.apache.hadoop.hbase.client.trace.StringTraceRenderer;
|
||||
import org.apache.hadoop.hbase.testclassification.ClientTests;
|
||||
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
||||
import org.apache.hadoop.hbase.trace.OpenTelemetryClassRule;
|
||||
import org.apache.hadoop.hbase.trace.OpenTelemetryTestRule;
|
||||
import org.apache.hadoop.hbase.trace.TraceUtil;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.hamcrest.Matcher;
|
||||
import org.junit.Before;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Rule;
|
||||
import org.junit.Test;
|
||||
import org.junit.experimental.categories.Category;
|
||||
import org.junit.rules.ExternalResource;
|
||||
import org.junit.rules.RuleChain;
|
||||
import org.junit.rules.TestName;
|
||||
import org.junit.rules.TestRule;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
@Category({ LargeTests.class, ClientTests.class })
|
||||
public class TestResultScannerTracing {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(TestResultScannerTracing.class);
|
||||
|
||||
@ClassRule
|
||||
public static final HBaseClassTestRule CLASS_RULE =
|
||||
HBaseClassTestRule.forClass(TestResultScannerTracing.class);
|
||||
|
||||
private static final TableName TABLE_NAME =
|
||||
TableName.valueOf(TestResultScannerTracing.class.getSimpleName());
|
||||
private static final byte[] FAMILY = Bytes.toBytes("f");
|
||||
private static final byte[] CQ = Bytes.toBytes("q");
|
||||
private static final int COUNT = 1000;
|
||||
|
||||
private static final OpenTelemetryClassRule otelClassRule = OpenTelemetryClassRule.create();
|
||||
private static final MiniClusterRule miniClusterRule = MiniClusterRule.newBuilder()
|
||||
.setMiniClusterOption(StartMiniClusterOption.builder().numRegionServers(3).build()).build();
|
||||
|
||||
private static final ConnectionRule connectionRule =
|
||||
ConnectionRule.createConnectionRule(miniClusterRule::createConnection);
|
||||
|
||||
private static final class Setup extends ExternalResource {
|
||||
|
||||
private Connection conn;
|
||||
|
||||
@Override
|
||||
protected void before() throws Throwable {
|
||||
final HBaseTestingUtility testUtil = miniClusterRule.getTestingUtility();
|
||||
conn = testUtil.getConnection();
|
||||
|
||||
byte[][] splitKeys = new byte[8][];
|
||||
for (int i = 111; i < 999; i += 111) {
|
||||
splitKeys[i / 111 - 1] = Bytes.toBytes(String.format("%03d", i));
|
||||
}
|
||||
testUtil.createTable(TABLE_NAME, FAMILY, splitKeys);
|
||||
testUtil.waitTableAvailable(TABLE_NAME);
|
||||
try (final Table table = conn.getTable(TABLE_NAME)) {
|
||||
table.put(
|
||||
IntStream.range(0, COUNT).mapToObj(i -> new Put(Bytes.toBytes(String.format("%03d", i)))
|
||||
.addColumn(FAMILY, CQ, Bytes.toBytes(i))).collect(Collectors.toList()));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void after() {
|
||||
try (Admin admin = conn.getAdmin()) {
|
||||
if (!admin.tableExists(TABLE_NAME)) {
|
||||
return;
|
||||
}
|
||||
admin.disableTable(TABLE_NAME);
|
||||
admin.deleteTable(TABLE_NAME);
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ClassRule
|
||||
public static final TestRule classRule = RuleChain.outerRule(otelClassRule)
|
||||
.around(miniClusterRule).around(connectionRule).around(new Setup());
|
||||
|
||||
@Rule
|
||||
public final OpenTelemetryTestRule otelTestRule = new OpenTelemetryTestRule(otelClassRule);
|
||||
|
||||
@Rule
|
||||
public final TestName testName = new TestName();
|
||||
|
||||
@Before
|
||||
public void before() throws Exception {
|
||||
final Connection conn = connectionRule.getConnection();
|
||||
try (final RegionLocator locator = conn.getRegionLocator(TABLE_NAME)) {
|
||||
locator.clearRegionLocationCache();
|
||||
}
|
||||
}
|
||||
|
||||
private static void waitForSpan(final Matcher<SpanData> parentSpanMatcher) {
|
||||
final Configuration conf = miniClusterRule.getTestingUtility().getConfiguration();
|
||||
Waiter.waitFor(conf, TimeUnit.SECONDS.toMillis(5), new MatcherPredicate<>(
|
||||
"Span for test failed to complete.", otelClassRule::getSpans, hasItem(parentSpanMatcher)));
|
||||
}
|
||||
|
||||
private Scan buildDefaultScan() {
|
||||
return new Scan().withStartRow(Bytes.toBytes(String.format("%03d", 1)))
|
||||
.withStopRow(Bytes.toBytes(String.format("%03d", 998)));
|
||||
}
|
||||
|
||||
private void assertDefaultScan(final Scan scan) {
|
||||
assertThat(scan.isReversed(), is(false));
|
||||
assertThat(scan.isAsyncPrefetch(), nullValue());
|
||||
}
|
||||
|
||||
private Scan buildAsyncPrefetchScan() {
|
||||
return new Scan().withStartRow(Bytes.toBytes(String.format("%03d", 1)))
|
||||
.withStopRow(Bytes.toBytes(String.format("%03d", 998))).setAsyncPrefetch(true);
|
||||
}
|
||||
|
||||
private void assertAsyncPrefetchScan(final Scan scan) {
|
||||
assertThat(scan.isReversed(), is(false));
|
||||
assertThat(scan.isAsyncPrefetch(), is(true));
|
||||
}
|
||||
|
||||
private Scan buildReversedScan() {
|
||||
return new Scan().withStartRow(Bytes.toBytes(String.format("%03d", 998)))
|
||||
.withStopRow(Bytes.toBytes(String.format("%03d", 1))).setReversed(true);
|
||||
}
|
||||
|
||||
private void assertReversedScan(final Scan scan) {
|
||||
assertThat(scan.isReversed(), is(true));
|
||||
assertThat(scan.isAsyncPrefetch(), nullValue());
|
||||
}
|
||||
|
||||
private void doScan(final Supplier<Scan> spanSupplier, final Consumer<Scan> scanAssertions)
|
||||
throws Exception {
|
||||
final Connection conn = connectionRule.getConnection();
|
||||
final Scan scan = spanSupplier.get();
|
||||
scanAssertions.accept(scan);
|
||||
try (final Table table = conn.getTable(TABLE_NAME);
|
||||
final ResultScanner scanner = table.getScanner(scan)) {
|
||||
final List<Result> results = new ArrayList<>(COUNT);
|
||||
scanner.forEach(results::add);
|
||||
assertThat(results, not(emptyIterable()));
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testNormalScan() throws Exception {
|
||||
TraceUtil.trace(() -> doScan(this::buildDefaultScan, this::assertDefaultScan),
|
||||
testName.getMethodName());
|
||||
|
||||
final String parentSpanName = testName.getMethodName();
|
||||
final Matcher<SpanData> parentSpanMatcher =
|
||||
allOf(hasName(parentSpanName), hasStatusWithCode(StatusCode.OK), hasEnded());
|
||||
waitForSpan(parentSpanMatcher);
|
||||
|
||||
final List<SpanData> spans =
|
||||
otelClassRule.getSpans().stream().filter(Objects::nonNull).collect(Collectors.toList());
|
||||
if (LOG.isDebugEnabled()) {
|
||||
StringTraceRenderer stringTraceRenderer = new StringTraceRenderer(spans);
|
||||
stringTraceRenderer.render(LOG::debug);
|
||||
}
|
||||
|
||||
final String parentSpanId = spans.stream().filter(parentSpanMatcher::matches)
|
||||
.map(SpanData::getSpanId).findAny().orElseThrow(AssertionError::new);
|
||||
|
||||
final Matcher<SpanData> scanOperationSpanMatcher =
|
||||
allOf(hasName(startsWith("SCAN " + TABLE_NAME.getNameWithNamespaceInclAsString())),
|
||||
hasParentSpanId(parentSpanId), hasStatusWithCode(StatusCode.OK), hasEnded());
|
||||
assertThat(spans, hasItem(scanOperationSpanMatcher));
|
||||
final String scanOperationSpanId = spans.stream().filter(scanOperationSpanMatcher::matches)
|
||||
.map(SpanData::getSpanId).findAny().orElseThrow(AssertionError::new);
|
||||
|
||||
final Matcher<SpanData> childMetaScanSpanMatcher = allOf(hasName(startsWith("SCAN hbase:meta")),
|
||||
hasParentSpanId(scanOperationSpanId), hasStatusWithCode(StatusCode.OK), hasEnded());
|
||||
assertThat("expected a scan of hbase:meta", spans, hasItem(childMetaScanSpanMatcher));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAsyncPrefetchScan() throws Exception {
|
||||
TraceUtil.trace(() -> doScan(this::buildAsyncPrefetchScan, this::assertAsyncPrefetchScan),
|
||||
testName.getMethodName());
|
||||
|
||||
final String parentSpanName = testName.getMethodName();
|
||||
final Matcher<SpanData> parentSpanMatcher =
|
||||
allOf(hasName(parentSpanName), hasStatusWithCode(StatusCode.OK), hasEnded());
|
||||
waitForSpan(parentSpanMatcher);
|
||||
|
||||
final List<SpanData> spans =
|
||||
otelClassRule.getSpans().stream().filter(Objects::nonNull).collect(Collectors.toList());
|
||||
if (LOG.isDebugEnabled()) {
|
||||
StringTraceRenderer stringTraceRenderer = new StringTraceRenderer(spans);
|
||||
stringTraceRenderer.render(LOG::debug);
|
||||
}
|
||||
|
||||
final String parentSpanId = spans.stream().filter(parentSpanMatcher::matches)
|
||||
.map(SpanData::getSpanId).findAny().orElseThrow(AssertionError::new);
|
||||
|
||||
final Matcher<SpanData> scanOperationSpanMatcher =
|
||||
allOf(hasName(startsWith("SCAN " + TABLE_NAME.getNameWithNamespaceInclAsString())),
|
||||
hasParentSpanId(parentSpanId), hasStatusWithCode(StatusCode.OK), hasEnded());
|
||||
assertThat(spans, hasItem(scanOperationSpanMatcher));
|
||||
final String scanOperationSpanId = spans.stream().filter(scanOperationSpanMatcher::matches)
|
||||
.map(SpanData::getSpanId).findAny().orElseThrow(AssertionError::new);
|
||||
|
||||
final Matcher<SpanData> childMetaScanSpanMatcher = allOf(hasName(startsWith("SCAN hbase:meta")),
|
||||
hasParentSpanId(scanOperationSpanId), hasStatusWithCode(StatusCode.OK), hasEnded());
|
||||
assertThat("expected a scan of hbase:meta", spans, hasItem(childMetaScanSpanMatcher));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testReversedScan() throws Exception {
|
||||
TraceUtil.trace(() -> doScan(this::buildReversedScan, this::assertReversedScan),
|
||||
testName.getMethodName());
|
||||
|
||||
final String parentSpanName = testName.getMethodName();
|
||||
final Matcher<SpanData> parentSpanMatcher =
|
||||
allOf(hasName(parentSpanName), hasStatusWithCode(StatusCode.OK), hasEnded());
|
||||
waitForSpan(parentSpanMatcher);
|
||||
|
||||
final List<SpanData> spans =
|
||||
otelClassRule.getSpans().stream().filter(Objects::nonNull).collect(Collectors.toList());
|
||||
if (LOG.isDebugEnabled()) {
|
||||
StringTraceRenderer stringTraceRenderer = new StringTraceRenderer(spans);
|
||||
stringTraceRenderer.render(LOG::debug);
|
||||
}
|
||||
|
||||
final String parentSpanId = spans.stream().filter(parentSpanMatcher::matches)
|
||||
.map(SpanData::getSpanId).findAny().orElseThrow(AssertionError::new);
|
||||
|
||||
final Matcher<SpanData> scanOperationSpanMatcher =
|
||||
allOf(hasName(startsWith("SCAN " + TABLE_NAME.getNameWithNamespaceInclAsString())),
|
||||
hasParentSpanId(parentSpanId), hasStatusWithCode(StatusCode.OK), hasEnded());
|
||||
assertThat(spans, hasItem(scanOperationSpanMatcher));
|
||||
final String scanOperationSpanId = spans.stream().filter(scanOperationSpanMatcher::matches)
|
||||
.map(SpanData::getSpanId).findAny().orElseThrow(AssertionError::new);
|
||||
|
||||
final Matcher<SpanData> childMetaScanSpanMatcher = allOf(hasName(startsWith("SCAN hbase:meta")),
|
||||
hasParentSpanId(scanOperationSpanId), hasStatusWithCode(StatusCode.OK), hasEnded());
|
||||
assertThat("expected a scan of hbase:meta", spans, hasItem(childMetaScanSpanMatcher));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,61 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import java.io.IOException;
|
||||
import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
|
||||
import org.apache.hadoop.hbase.trace.TraceUtil;
|
||||
|
||||
/**
|
||||
* A drop-in replacement for {@link BufferingScanResultConsumer} that adds tracing spans to its
|
||||
* implementation of the {@link AdvancedScanResultConsumer} API.
|
||||
*/
|
||||
public class TracedAdvancedScanResultConsumer implements AdvancedScanResultConsumer {
|
||||
|
||||
private final BufferingScanResultConsumer delegate = new BufferingScanResultConsumer();
|
||||
|
||||
@Override
|
||||
public void onScanMetricsCreated(ScanMetrics scanMetrics) {
|
||||
TraceUtil.trace(() -> delegate.onScanMetricsCreated(scanMetrics),
|
||||
"TracedAdvancedScanResultConsumer#onScanMetricsCreated");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onNext(Result[] results, ScanController controller) {
|
||||
TraceUtil.trace(() -> delegate.onNext(results, controller),
|
||||
"TracedAdvancedScanResultConsumer#onNext");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onError(Throwable error) {
|
||||
TraceUtil.trace(() -> delegate.onError(error), "TracedAdvancedScanResultConsumer#onError");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onComplete() {
|
||||
TraceUtil.trace(delegate::onComplete, "TracedAdvancedScanResultConsumer#onComplete");
|
||||
}
|
||||
|
||||
public Result take() throws IOException, InterruptedException {
|
||||
return delegate.take();
|
||||
}
|
||||
|
||||
public ScanMetrics getScanMetrics() {
|
||||
return delegate.getScanMetrics();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,65 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import java.util.List;
|
||||
import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
|
||||
import org.apache.hadoop.hbase.trace.TraceUtil;
|
||||
|
||||
/**
|
||||
* A wrapper over {@link SimpleScanResultConsumer} that adds tracing of spans to its implementation.
|
||||
*/
|
||||
class TracedScanResultConsumer implements SimpleScanResultConsumer {
|
||||
|
||||
private final SimpleScanResultConsumer delegate;
|
||||
|
||||
public TracedScanResultConsumer(final SimpleScanResultConsumer delegate) {
|
||||
this.delegate = delegate;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onScanMetricsCreated(ScanMetrics scanMetrics) {
|
||||
TraceUtil.trace(() -> delegate.onScanMetricsCreated(scanMetrics),
|
||||
"TracedScanResultConsumer#onScanMetricsCreated");
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean onNext(Result result) {
|
||||
return TraceUtil.trace(() -> delegate.onNext(result), "TracedScanResultConsumer#onNext");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onError(Throwable error) {
|
||||
TraceUtil.trace(() -> delegate.onError(error), "TracedScanResultConsumer#onError");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onComplete() {
|
||||
TraceUtil.trace(delegate::onComplete, "TracedScanResultConsumer#onComplete");
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Result> getAll() throws Exception {
|
||||
return delegate.getAll();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ScanMetrics getScanMetrics() {
|
||||
return delegate.getScanMetrics();
|
||||
}
|
||||
}
|
3
pom.xml
3
pom.xml
|
@ -692,7 +692,8 @@
|
|||
<hbase-surefire.cygwin-argLine>-enableassertions -Xmx${surefire.cygwinXmx}
|
||||
-Djava.security.egd=file:/dev/./urandom -Djava.net.preferIPv4Stack=true
|
||||
"-Djava.library.path=${hadoop.library.path};${java.library.path}"
|
||||
-Dorg.apache.hbase.thirdparty.io.netty.leakDetection.level=advanced</hbase-surefire.cygwin-argLine>
|
||||
-Dorg.apache.hbase.thirdparty.io.netty.leakDetection.level=advanced
|
||||
-Dio.opentelemetry.context.enableStrictContext=true</hbase-surefire.cygwin-argLine>
|
||||
<!-- Surefire argLine defaults to Linux, cygwin argLine is used in the os.windows profile -->
|
||||
<argLine>${hbase-surefire.argLine}</argLine>
|
||||
<jacoco.version>0.7.5.201505241946</jacoco.version>
|
||||
|
|
Loading…
Reference in New Issue