HBASE-22244 Make use of MetricsConnection in async client
This commit is contained in:
parent
f4aaf735e4
commit
a3d2a2df3a
|
@ -197,10 +197,9 @@ class AsyncClientScanner {
|
||||||
private void openScanner() {
|
private void openScanner() {
|
||||||
incRegionCountMetrics(scanMetrics);
|
incRegionCountMetrics(scanMetrics);
|
||||||
openScannerTries.set(1);
|
openScannerTries.set(1);
|
||||||
addListener(
|
addListener(timelineConsistentRead(conn.getLocator(), tableName, scan, scan.getStartRow(),
|
||||||
timelineConsistentRead(conn.getLocator(), tableName, scan, scan.getStartRow(),
|
getLocateType(scan), this::openScanner, rpcTimeoutNs, getPrimaryTimeoutNs(), retryTimer,
|
||||||
getLocateType(scan), this::openScanner, rpcTimeoutNs, getPrimaryTimeoutNs(), retryTimer),
|
conn.getConnectionMetrics()), (resp, error) -> {
|
||||||
(resp, error) -> {
|
|
||||||
if (error != null) {
|
if (error != null) {
|
||||||
consumer.onError(error);
|
consumer.onError(error);
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -19,10 +19,12 @@ package org.apache.hadoop.hbase.client;
|
||||||
|
|
||||||
import static org.apache.hadoop.hbase.client.ConnectionUtils.NO_NONCE_GENERATOR;
|
import static org.apache.hadoop.hbase.client.ConnectionUtils.NO_NONCE_GENERATOR;
|
||||||
import static org.apache.hadoop.hbase.client.ConnectionUtils.getStubKey;
|
import static org.apache.hadoop.hbase.client.ConnectionUtils.getStubKey;
|
||||||
|
import static org.apache.hadoop.hbase.client.MetricsConnection.CLIENT_SIDE_METRICS_ENABLED_KEY;
|
||||||
import static org.apache.hadoop.hbase.client.NonceGenerator.CLIENT_NONCES_ENABLED_KEY;
|
import static org.apache.hadoop.hbase.client.NonceGenerator.CLIENT_NONCES_ENABLED_KEY;
|
||||||
import static org.apache.hadoop.hbase.util.FutureUtils.addListener;
|
import static org.apache.hadoop.hbase.util.FutureUtils.addListener;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.util.Optional;
|
||||||
import java.util.concurrent.CompletableFuture;
|
import java.util.concurrent.CompletableFuture;
|
||||||
import java.util.concurrent.ConcurrentHashMap;
|
import java.util.concurrent.ConcurrentHashMap;
|
||||||
import java.util.concurrent.ConcurrentMap;
|
import java.util.concurrent.ConcurrentMap;
|
||||||
|
@ -103,6 +105,8 @@ class AsyncConnectionImpl implements AsyncConnection {
|
||||||
|
|
||||||
private volatile boolean closed = false;
|
private volatile boolean closed = false;
|
||||||
|
|
||||||
|
private final Optional<MetricsConnection> metrics;
|
||||||
|
|
||||||
public AsyncConnectionImpl(Configuration conf, AsyncRegistry registry, String clusterId,
|
public AsyncConnectionImpl(Configuration conf, AsyncRegistry registry, String clusterId,
|
||||||
User user) {
|
User user) {
|
||||||
this.conf = conf;
|
this.conf = conf;
|
||||||
|
@ -112,7 +116,12 @@ class AsyncConnectionImpl implements AsyncConnection {
|
||||||
}
|
}
|
||||||
this.connConf = new AsyncConnectionConfiguration(conf);
|
this.connConf = new AsyncConnectionConfiguration(conf);
|
||||||
this.registry = registry;
|
this.registry = registry;
|
||||||
this.rpcClient = RpcClientFactory.createClient(conf, clusterId);
|
if (conf.getBoolean(CLIENT_SIDE_METRICS_ENABLED_KEY, false)) {
|
||||||
|
this.metrics = Optional.of(new MetricsConnection(this.toString(), () -> null, () -> null));
|
||||||
|
} else {
|
||||||
|
this.metrics = Optional.empty();
|
||||||
|
}
|
||||||
|
this.rpcClient = RpcClientFactory.createClient(conf, clusterId, metrics.orElse(null));
|
||||||
this.rpcControllerFactory = RpcControllerFactory.instantiate(conf);
|
this.rpcControllerFactory = RpcControllerFactory.instantiate(conf);
|
||||||
this.hostnameCanChange = conf.getBoolean(RESOLVE_HOSTNAME_ON_FAIL_KEY, true);
|
this.hostnameCanChange = conf.getBoolean(RESOLVE_HOSTNAME_ON_FAIL_KEY, true);
|
||||||
this.rpcTimeout =
|
this.rpcTimeout =
|
||||||
|
@ -148,6 +157,7 @@ class AsyncConnectionImpl implements AsyncConnection {
|
||||||
if (authService != null) {
|
if (authService != null) {
|
||||||
authService.shutdown();
|
authService.shutdown();
|
||||||
}
|
}
|
||||||
|
metrics.ifPresent(MetricsConnection::shutdown);
|
||||||
closed = true;
|
closed = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -312,4 +322,8 @@ class AsyncConnectionImpl implements AsyncConnection {
|
||||||
public void clearRegionLocationCache() {
|
public void clearRegionLocationCache() {
|
||||||
locator.clearCache();
|
locator.clearCache();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Optional<MetricsConnection> getConnectionMetrics() {
|
||||||
|
return metrics;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,6 +23,7 @@ import static org.apache.hadoop.hbase.client.AsyncRegionLocatorHelper.isGood;
|
||||||
import static org.apache.hadoop.hbase.client.AsyncRegionLocatorHelper.removeRegionLocation;
|
import static org.apache.hadoop.hbase.client.AsyncRegionLocatorHelper.removeRegionLocation;
|
||||||
import static org.apache.hadoop.hbase.client.AsyncRegionLocatorHelper.replaceRegionLocation;
|
import static org.apache.hadoop.hbase.client.AsyncRegionLocatorHelper.replaceRegionLocation;
|
||||||
|
|
||||||
|
import java.util.Optional;
|
||||||
import java.util.concurrent.CompletableFuture;
|
import java.util.concurrent.CompletableFuture;
|
||||||
import java.util.concurrent.atomic.AtomicReference;
|
import java.util.concurrent.atomic.AtomicReference;
|
||||||
import org.apache.hadoop.hbase.HRegionLocation;
|
import org.apache.hadoop.hbase.HRegionLocation;
|
||||||
|
@ -106,7 +107,7 @@ class AsyncMetaRegionLocator {
|
||||||
|
|
||||||
void updateCachedLocationOnError(HRegionLocation loc, Throwable exception) {
|
void updateCachedLocationOnError(HRegionLocation loc, Throwable exception) {
|
||||||
AsyncRegionLocatorHelper.updateCachedLocationOnError(loc, exception, this::getCacheLocation,
|
AsyncRegionLocatorHelper.updateCachedLocationOnError(loc, exception, this::getCacheLocation,
|
||||||
this::addLocationToCache, this::removeLocationFromCache);
|
this::addLocationToCache, this::removeLocationFromCache, Optional.empty());
|
||||||
}
|
}
|
||||||
|
|
||||||
void clearCache() {
|
void clearCache() {
|
||||||
|
|
|
@ -338,15 +338,25 @@ class AsyncNonMetaRegionLocator {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private void recordCacheHit() {
|
||||||
|
conn.getConnectionMetrics().ifPresent(MetricsConnection::incrMetaCacheHit);
|
||||||
|
}
|
||||||
|
|
||||||
|
private void recordCacheMiss() {
|
||||||
|
conn.getConnectionMetrics().ifPresent(MetricsConnection::incrMetaCacheMiss);
|
||||||
|
}
|
||||||
|
|
||||||
private RegionLocations locateRowInCache(TableCache tableCache, TableName tableName, byte[] row,
|
private RegionLocations locateRowInCache(TableCache tableCache, TableName tableName, byte[] row,
|
||||||
int replicaId) {
|
int replicaId) {
|
||||||
Map.Entry<byte[], RegionLocations> entry = tableCache.cache.floorEntry(row);
|
Map.Entry<byte[], RegionLocations> entry = tableCache.cache.floorEntry(row);
|
||||||
if (entry == null) {
|
if (entry == null) {
|
||||||
|
recordCacheMiss();
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
RegionLocations locs = entry.getValue();
|
RegionLocations locs = entry.getValue();
|
||||||
HRegionLocation loc = locs.getRegionLocation(replicaId);
|
HRegionLocation loc = locs.getRegionLocation(replicaId);
|
||||||
if (loc == null) {
|
if (loc == null) {
|
||||||
|
recordCacheMiss();
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
byte[] endKey = loc.getRegion().getEndKey();
|
byte[] endKey = loc.getRegion().getEndKey();
|
||||||
|
@ -355,8 +365,10 @@ class AsyncNonMetaRegionLocator {
|
||||||
LOG.trace("Found {} in cache for {}, row='{}', locateType={}, replicaId={}", loc, tableName,
|
LOG.trace("Found {} in cache for {}, row='{}', locateType={}, replicaId={}", loc, tableName,
|
||||||
Bytes.toStringBinary(row), RegionLocateType.CURRENT, replicaId);
|
Bytes.toStringBinary(row), RegionLocateType.CURRENT, replicaId);
|
||||||
}
|
}
|
||||||
|
recordCacheHit();
|
||||||
return locs;
|
return locs;
|
||||||
} else {
|
} else {
|
||||||
|
recordCacheMiss();
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -367,11 +379,13 @@ class AsyncNonMetaRegionLocator {
|
||||||
Map.Entry<byte[], RegionLocations> entry =
|
Map.Entry<byte[], RegionLocations> entry =
|
||||||
isEmptyStopRow ? tableCache.cache.lastEntry() : tableCache.cache.lowerEntry(row);
|
isEmptyStopRow ? tableCache.cache.lastEntry() : tableCache.cache.lowerEntry(row);
|
||||||
if (entry == null) {
|
if (entry == null) {
|
||||||
|
recordCacheMiss();
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
RegionLocations locs = entry.getValue();
|
RegionLocations locs = entry.getValue();
|
||||||
HRegionLocation loc = locs.getRegionLocation(replicaId);
|
HRegionLocation loc = locs.getRegionLocation(replicaId);
|
||||||
if (loc == null) {
|
if (loc == null) {
|
||||||
|
recordCacheMiss();
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
if (isEmptyStopRow(loc.getRegion().getEndKey()) ||
|
if (isEmptyStopRow(loc.getRegion().getEndKey()) ||
|
||||||
|
@ -380,8 +394,10 @@ class AsyncNonMetaRegionLocator {
|
||||||
LOG.trace("Found {} in cache for {}, row='{}', locateType={}, replicaId={}", loc, tableName,
|
LOG.trace("Found {} in cache for {}, row='{}', locateType={}, replicaId={}", loc, tableName,
|
||||||
Bytes.toStringBinary(row), RegionLocateType.BEFORE, replicaId);
|
Bytes.toStringBinary(row), RegionLocateType.BEFORE, replicaId);
|
||||||
}
|
}
|
||||||
|
recordCacheHit();
|
||||||
return locs;
|
return locs;
|
||||||
} else {
|
} else {
|
||||||
|
recordCacheMiss();
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -529,6 +545,10 @@ class AsyncNonMetaRegionLocator {
|
||||||
return getRegionLocationsInternal(tableName, row, replicaId, locateType, reload);
|
return getRegionLocationsInternal(tableName, row, replicaId, locateType, reload);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private void recordClearRegionCache() {
|
||||||
|
conn.getConnectionMetrics().ifPresent(MetricsConnection::incrMetaCacheNumClearRegion);
|
||||||
|
}
|
||||||
|
|
||||||
private void removeLocationFromCache(HRegionLocation loc) {
|
private void removeLocationFromCache(HRegionLocation loc) {
|
||||||
TableCache tableCache = cache.get(loc.getRegion().getTable());
|
TableCache tableCache = cache.get(loc.getRegion().getTable());
|
||||||
if (tableCache == null) {
|
if (tableCache == null) {
|
||||||
|
@ -544,10 +564,12 @@ class AsyncNonMetaRegionLocator {
|
||||||
RegionLocations newLocs = removeRegionLocation(oldLocs, loc.getRegion().getReplicaId());
|
RegionLocations newLocs = removeRegionLocation(oldLocs, loc.getRegion().getReplicaId());
|
||||||
if (newLocs == null) {
|
if (newLocs == null) {
|
||||||
if (tableCache.cache.remove(startKey, oldLocs)) {
|
if (tableCache.cache.remove(startKey, oldLocs)) {
|
||||||
|
recordClearRegionCache();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (tableCache.cache.replace(startKey, oldLocs, newLocs)) {
|
if (tableCache.cache.replace(startKey, oldLocs, newLocs)) {
|
||||||
|
recordClearRegionCache();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -569,7 +591,7 @@ class AsyncNonMetaRegionLocator {
|
||||||
|
|
||||||
void updateCachedLocationOnError(HRegionLocation loc, Throwable exception) {
|
void updateCachedLocationOnError(HRegionLocation loc, Throwable exception) {
|
||||||
AsyncRegionLocatorHelper.updateCachedLocationOnError(loc, exception, this::getCachedLocation,
|
AsyncRegionLocatorHelper.updateCachedLocationOnError(loc, exception, this::getCachedLocation,
|
||||||
this::addLocationToCache, this::removeLocationFromCache);
|
this::addLocationToCache, this::removeLocationFromCache, conn.getConnectionMetrics());
|
||||||
}
|
}
|
||||||
|
|
||||||
void clearCache(TableName tableName) {
|
void clearCache(TableName tableName) {
|
||||||
|
@ -583,6 +605,8 @@ class AsyncNonMetaRegionLocator {
|
||||||
tableCache.allRequests.values().forEach(f -> f.completeExceptionally(error));
|
tableCache.allRequests.values().forEach(f -> f.completeExceptionally(error));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
conn.getConnectionMetrics()
|
||||||
|
.ifPresent(metrics -> metrics.incrMetaCacheNumClearRegion(tableCache.cache.size()));
|
||||||
}
|
}
|
||||||
|
|
||||||
void clearCache() {
|
void clearCache() {
|
||||||
|
|
|
@ -21,6 +21,7 @@ import static org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil.findExcept
|
||||||
import static org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil.isMetaClearingException;
|
import static org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil.isMetaClearingException;
|
||||||
|
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
|
import java.util.Optional;
|
||||||
import java.util.function.Consumer;
|
import java.util.function.Consumer;
|
||||||
import java.util.function.Function;
|
import java.util.function.Function;
|
||||||
import org.apache.commons.lang3.ObjectUtils;
|
import org.apache.commons.lang3.ObjectUtils;
|
||||||
|
@ -51,7 +52,8 @@ final class AsyncRegionLocatorHelper {
|
||||||
|
|
||||||
static void updateCachedLocationOnError(HRegionLocation loc, Throwable exception,
|
static void updateCachedLocationOnError(HRegionLocation loc, Throwable exception,
|
||||||
Function<HRegionLocation, HRegionLocation> cachedLocationSupplier,
|
Function<HRegionLocation, HRegionLocation> cachedLocationSupplier,
|
||||||
Consumer<HRegionLocation> addToCache, Consumer<HRegionLocation> removeFromCache) {
|
Consumer<HRegionLocation> addToCache, Consumer<HRegionLocation> removeFromCache,
|
||||||
|
Optional<MetricsConnection> metrics) {
|
||||||
HRegionLocation oldLoc = cachedLocationSupplier.apply(loc);
|
HRegionLocation oldLoc = cachedLocationSupplier.apply(loc);
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
LOG.debug("Try updating {} , the old value is {}, error={}", loc, oldLoc,
|
LOG.debug("Try updating {} , the old value is {}, error={}", loc, oldLoc,
|
||||||
|
@ -78,6 +80,7 @@ final class AsyncRegionLocatorHelper {
|
||||||
addToCache.accept(newLoc);
|
addToCache.accept(newLoc);
|
||||||
} else {
|
} else {
|
||||||
LOG.debug("Try removing {} from cache", loc);
|
LOG.debug("Try removing {} from cache", loc);
|
||||||
|
metrics.ifPresent(m -> m.incrCacheDroppingExceptions(exception));
|
||||||
removeFromCache.accept(loc);
|
removeFromCache.accept(loc);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -190,10 +190,10 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
|
||||||
|
|
||||||
// thread executor shared by all Table instances created
|
// thread executor shared by all Table instances created
|
||||||
// by this connection
|
// by this connection
|
||||||
private volatile ExecutorService batchPool = null;
|
private volatile ThreadPoolExecutor batchPool = null;
|
||||||
// meta thread executor shared by all Table instances created
|
// meta thread executor shared by all Table instances created
|
||||||
// by this connection
|
// by this connection
|
||||||
private volatile ExecutorService metaLookupPool = null;
|
private volatile ThreadPoolExecutor metaLookupPool = null;
|
||||||
private volatile boolean cleanupPool = false;
|
private volatile boolean cleanupPool = false;
|
||||||
|
|
||||||
private final Configuration conf;
|
private final Configuration conf;
|
||||||
|
@ -238,14 +238,13 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
|
||||||
* constructor
|
* constructor
|
||||||
* @param conf Configuration object
|
* @param conf Configuration object
|
||||||
*/
|
*/
|
||||||
ConnectionImplementation(Configuration conf,
|
ConnectionImplementation(Configuration conf, ExecutorService pool, User user) throws IOException {
|
||||||
ExecutorService pool, User user) throws IOException {
|
|
||||||
this.conf = conf;
|
this.conf = conf;
|
||||||
this.user = user;
|
this.user = user;
|
||||||
if (user != null && user.isLoginFromKeytab()) {
|
if (user != null && user.isLoginFromKeytab()) {
|
||||||
spawnRenewalChore(user.getUGI());
|
spawnRenewalChore(user.getUGI());
|
||||||
}
|
}
|
||||||
this.batchPool = pool;
|
this.batchPool = (ThreadPoolExecutor) pool;
|
||||||
this.connectionConfig = new ConnectionConfiguration(conf);
|
this.connectionConfig = new ConnectionConfiguration(conf);
|
||||||
this.closed = false;
|
this.closed = false;
|
||||||
this.pause = conf.getLong(HConstants.HBASE_CLIENT_PAUSE,
|
this.pause = conf.getLong(HConstants.HBASE_CLIENT_PAUSE,
|
||||||
|
@ -286,7 +285,8 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
|
||||||
this.backoffPolicy = ClientBackoffPolicyFactory.create(conf);
|
this.backoffPolicy = ClientBackoffPolicyFactory.create(conf);
|
||||||
this.asyncProcess = new AsyncProcess(this, conf, rpcCallerFactory, rpcControllerFactory);
|
this.asyncProcess = new AsyncProcess(this, conf, rpcCallerFactory, rpcControllerFactory);
|
||||||
if (conf.getBoolean(CLIENT_SIDE_METRICS_ENABLED_KEY, false)) {
|
if (conf.getBoolean(CLIENT_SIDE_METRICS_ENABLED_KEY, false)) {
|
||||||
this.metrics = new MetricsConnection(this);
|
this.metrics =
|
||||||
|
new MetricsConnection(this.toString(), this::getBatchPool, this::getMetaLookupPool);
|
||||||
} else {
|
} else {
|
||||||
this.metrics = null;
|
this.metrics = null;
|
||||||
}
|
}
|
||||||
|
@ -461,7 +461,7 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
|
||||||
return this.metrics;
|
return this.metrics;
|
||||||
}
|
}
|
||||||
|
|
||||||
private ExecutorService getBatchPool() {
|
private ThreadPoolExecutor getBatchPool() {
|
||||||
if (batchPool == null) {
|
if (batchPool == null) {
|
||||||
synchronized (this) {
|
synchronized (this) {
|
||||||
if (batchPool == null) {
|
if (batchPool == null) {
|
||||||
|
@ -474,7 +474,7 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
|
||||||
return this.batchPool;
|
return this.batchPool;
|
||||||
}
|
}
|
||||||
|
|
||||||
private ExecutorService getThreadPool(int maxThreads, int coreThreads, String nameHint,
|
private ThreadPoolExecutor getThreadPool(int maxThreads, int coreThreads, String nameHint,
|
||||||
BlockingQueue<Runnable> passedWorkQueue) {
|
BlockingQueue<Runnable> passedWorkQueue) {
|
||||||
// shared HTable thread executor not yet initialized
|
// shared HTable thread executor not yet initialized
|
||||||
if (maxThreads == 0) {
|
if (maxThreads == 0) {
|
||||||
|
@ -503,7 +503,7 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
|
||||||
return tpe;
|
return tpe;
|
||||||
}
|
}
|
||||||
|
|
||||||
private ExecutorService getMetaLookupPool() {
|
private ThreadPoolExecutor getMetaLookupPool() {
|
||||||
if (this.metaLookupPool == null) {
|
if (this.metaLookupPool == null) {
|
||||||
synchronized (this) {
|
synchronized (this) {
|
||||||
if (this.metaLookupPool == null) {
|
if (this.metaLookupPool == null) {
|
||||||
|
|
|
@ -28,6 +28,7 @@ import java.net.InetAddress;
|
||||||
import java.net.UnknownHostException;
|
import java.net.UnknownHostException;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
import java.util.Optional;
|
||||||
import java.util.concurrent.CompletableFuture;
|
import java.util.concurrent.CompletableFuture;
|
||||||
import java.util.concurrent.ExecutorService;
|
import java.util.concurrent.ExecutorService;
|
||||||
import java.util.concurrent.ThreadLocalRandom;
|
import java.util.concurrent.ThreadLocalRandom;
|
||||||
|
@ -500,13 +501,19 @@ public final class ConnectionUtils {
|
||||||
/**
|
/**
|
||||||
* Connect the two futures, if the src future is done, then mark the dst future as done. And if
|
* Connect the two futures, if the src future is done, then mark the dst future as done. And if
|
||||||
* the dst future is done, then cancel the src future. This is used for timeline consistent read.
|
* the dst future is done, then cancel the src future. This is used for timeline consistent read.
|
||||||
|
* <p/>
|
||||||
|
* Pass empty metrics if you want to link the primary future and the dst future so we will not
|
||||||
|
* increase the hedge read related metrics.
|
||||||
*/
|
*/
|
||||||
private static <T> void connect(CompletableFuture<T> srcFuture, CompletableFuture<T> dstFuture) {
|
private static <T> void connect(CompletableFuture<T> srcFuture, CompletableFuture<T> dstFuture,
|
||||||
|
Optional<MetricsConnection> metrics) {
|
||||||
addListener(srcFuture, (r, e) -> {
|
addListener(srcFuture, (r, e) -> {
|
||||||
if (e != null) {
|
if (e != null) {
|
||||||
dstFuture.completeExceptionally(e);
|
dstFuture.completeExceptionally(e);
|
||||||
} else {
|
} else {
|
||||||
dstFuture.complete(r);
|
if (dstFuture.complete(r)) {
|
||||||
|
metrics.ifPresent(MetricsConnection::incrHedgedReadWin);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
// The cancellation may be a dummy one as the dstFuture may be completed by this srcFuture.
|
// The cancellation may be a dummy one as the dstFuture may be completed by this srcFuture.
|
||||||
|
@ -519,7 +526,7 @@ public final class ConnectionUtils {
|
||||||
|
|
||||||
private static <T> void sendRequestsToSecondaryReplicas(
|
private static <T> void sendRequestsToSecondaryReplicas(
|
||||||
Function<Integer, CompletableFuture<T>> requestReplica, RegionLocations locs,
|
Function<Integer, CompletableFuture<T>> requestReplica, RegionLocations locs,
|
||||||
CompletableFuture<T> future) {
|
CompletableFuture<T> future, Optional<MetricsConnection> metrics) {
|
||||||
if (future.isDone()) {
|
if (future.isDone()) {
|
||||||
// do not send requests to secondary replicas if the future is done, i.e, the primary request
|
// do not send requests to secondary replicas if the future is done, i.e, the primary request
|
||||||
// has already been finished.
|
// has already been finished.
|
||||||
|
@ -527,14 +534,15 @@ public final class ConnectionUtils {
|
||||||
}
|
}
|
||||||
for (int replicaId = 1, n = locs.size(); replicaId < n; replicaId++) {
|
for (int replicaId = 1, n = locs.size(); replicaId < n; replicaId++) {
|
||||||
CompletableFuture<T> secondaryFuture = requestReplica.apply(replicaId);
|
CompletableFuture<T> secondaryFuture = requestReplica.apply(replicaId);
|
||||||
connect(secondaryFuture, future);
|
metrics.ifPresent(MetricsConnection::incrHedgedReadOps);
|
||||||
|
connect(secondaryFuture, future, metrics);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static <T> CompletableFuture<T> timelineConsistentRead(AsyncRegionLocator locator,
|
static <T> CompletableFuture<T> timelineConsistentRead(AsyncRegionLocator locator,
|
||||||
TableName tableName, Query query, byte[] row, RegionLocateType locateType,
|
TableName tableName, Query query, byte[] row, RegionLocateType locateType,
|
||||||
Function<Integer, CompletableFuture<T>> requestReplica, long rpcTimeoutNs,
|
Function<Integer, CompletableFuture<T>> requestReplica, long rpcTimeoutNs,
|
||||||
long primaryCallTimeoutNs, Timer retryTimer) {
|
long primaryCallTimeoutNs, Timer retryTimer, Optional<MetricsConnection> metrics) {
|
||||||
if (query.getConsistency() == Consistency.STRONG) {
|
if (query.getConsistency() == Consistency.STRONG) {
|
||||||
return requestReplica.apply(RegionReplicaUtil.DEFAULT_REPLICA_ID);
|
return requestReplica.apply(RegionReplicaUtil.DEFAULT_REPLICA_ID);
|
||||||
}
|
}
|
||||||
|
@ -545,7 +553,7 @@ public final class ConnectionUtils {
|
||||||
// Timeline consistent read, where we may send requests to other region replicas
|
// Timeline consistent read, where we may send requests to other region replicas
|
||||||
CompletableFuture<T> primaryFuture = requestReplica.apply(RegionReplicaUtil.DEFAULT_REPLICA_ID);
|
CompletableFuture<T> primaryFuture = requestReplica.apply(RegionReplicaUtil.DEFAULT_REPLICA_ID);
|
||||||
CompletableFuture<T> future = new CompletableFuture<>();
|
CompletableFuture<T> future = new CompletableFuture<>();
|
||||||
connect(primaryFuture, future);
|
connect(primaryFuture, future, Optional.empty());
|
||||||
long startNs = System.nanoTime();
|
long startNs = System.nanoTime();
|
||||||
// after the getRegionLocations, all the locations for the replicas of this region should have
|
// after the getRegionLocations, all the locations for the replicas of this region should have
|
||||||
// been cached, so it is not big deal to locate them again when actually sending requests to
|
// been cached, so it is not big deal to locate them again when actually sending requests to
|
||||||
|
@ -567,11 +575,11 @@ public final class ConnectionUtils {
|
||||||
}
|
}
|
||||||
long delayNs = primaryCallTimeoutNs - (System.nanoTime() - startNs);
|
long delayNs = primaryCallTimeoutNs - (System.nanoTime() - startNs);
|
||||||
if (delayNs <= 0) {
|
if (delayNs <= 0) {
|
||||||
sendRequestsToSecondaryReplicas(requestReplica, locs, future);
|
sendRequestsToSecondaryReplicas(requestReplica, locs, future, metrics);
|
||||||
} else {
|
} else {
|
||||||
retryTimer.newTimeout(
|
retryTimer.newTimeout(
|
||||||
timeout -> sendRequestsToSecondaryReplicas(requestReplica, locs, future), delayNs,
|
timeout -> sendRequestsToSecondaryReplicas(requestReplica, locs, future, metrics),
|
||||||
TimeUnit.NANOSECONDS);
|
delayNs, TimeUnit.NANOSECONDS);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
return future;
|
return future;
|
||||||
|
|
|
@ -33,7 +33,7 @@ import java.util.concurrent.ConcurrentMap;
|
||||||
import java.util.concurrent.ConcurrentSkipListMap;
|
import java.util.concurrent.ConcurrentSkipListMap;
|
||||||
import java.util.concurrent.ThreadPoolExecutor;
|
import java.util.concurrent.ThreadPoolExecutor;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
|
import java.util.function.Supplier;
|
||||||
import org.apache.hadoop.hbase.ServerName;
|
import org.apache.hadoop.hbase.ServerName;
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor;
|
import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor;
|
||||||
|
@ -305,30 +305,30 @@ public class MetricsConnection implements StatisticTrackable {
|
||||||
private final ConcurrentMap<String, Counter> cacheDroppingExceptions =
|
private final ConcurrentMap<String, Counter> cacheDroppingExceptions =
|
||||||
new ConcurrentHashMap<>(CAPACITY, LOAD_FACTOR, CONCURRENCY_LEVEL);
|
new ConcurrentHashMap<>(CAPACITY, LOAD_FACTOR, CONCURRENCY_LEVEL);
|
||||||
|
|
||||||
MetricsConnection(final ConnectionImplementation conn) {
|
MetricsConnection(String scope, Supplier<ThreadPoolExecutor> batchPool,
|
||||||
this.scope = conn.toString();
|
Supplier<ThreadPoolExecutor> metaPool) {
|
||||||
|
this.scope = scope;
|
||||||
this.registry = new MetricRegistry();
|
this.registry = new MetricRegistry();
|
||||||
|
|
||||||
this.registry.register(getExecutorPoolName(),
|
this.registry.register(getExecutorPoolName(),
|
||||||
new RatioGauge() {
|
new RatioGauge() {
|
||||||
@Override
|
@Override
|
||||||
protected Ratio getRatio() {
|
protected Ratio getRatio() {
|
||||||
ThreadPoolExecutor batchPool = (ThreadPoolExecutor) conn.getCurrentBatchPool();
|
ThreadPoolExecutor pool = batchPool.get();
|
||||||
if (batchPool == null) {
|
if (pool == null) {
|
||||||
return Ratio.of(0, 0);
|
return Ratio.of(0, 0);
|
||||||
}
|
}
|
||||||
return Ratio.of(batchPool.getActiveCount(), batchPool.getMaximumPoolSize());
|
return Ratio.of(pool.getActiveCount(), pool.getMaximumPoolSize());
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
this.registry.register(getMetaPoolName(),
|
this.registry.register(getMetaPoolName(),
|
||||||
new RatioGauge() {
|
new RatioGauge() {
|
||||||
@Override
|
@Override
|
||||||
protected Ratio getRatio() {
|
protected Ratio getRatio() {
|
||||||
ThreadPoolExecutor metaPool = (ThreadPoolExecutor) conn.getCurrentMetaLookupPool();
|
ThreadPoolExecutor pool = metaPool.get();
|
||||||
if (metaPool == null) {
|
if (pool == null) {
|
||||||
return Ratio.of(0, 0);
|
return Ratio.of(0, 0);
|
||||||
}
|
}
|
||||||
return Ratio.of(metaPool.getActiveCount(), metaPool.getMaximumPoolSize());
|
return Ratio.of(pool.getActiveCount(), pool.getMaximumPoolSize());
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
this.metaCacheHits = registry.counter(name(this.getClass(), "metaCacheHits", scope));
|
this.metaCacheHits = registry.counter(name(this.getClass(), "metaCacheHits", scope));
|
||||||
|
@ -401,6 +401,11 @@ public class MetricsConnection implements StatisticTrackable {
|
||||||
metaCacheNumClearRegion.inc();
|
metaCacheNumClearRegion.inc();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Increment the number of meta cache drops requested for individual region. */
|
||||||
|
public void incrMetaCacheNumClearRegion(int count) {
|
||||||
|
metaCacheNumClearRegion.inc(count);
|
||||||
|
}
|
||||||
|
|
||||||
/** Increment the number of hedged read that have occurred. */
|
/** Increment the number of hedged read that have occurred. */
|
||||||
public void incrHedgedReadOps() {
|
public void incrHedgedReadOps() {
|
||||||
hedgedReadOps.inc();
|
hedgedReadOps.inc();
|
||||||
|
|
|
@ -232,7 +232,7 @@ class RawAsyncTableImpl implements AsyncTable<AdvancedScanResultConsumer> {
|
||||||
public CompletableFuture<Result> get(Get get) {
|
public CompletableFuture<Result> get(Get get) {
|
||||||
return timelineConsistentRead(conn.getLocator(), tableName, get, get.getRow(),
|
return timelineConsistentRead(conn.getLocator(), tableName, get, get.getRow(),
|
||||||
RegionLocateType.CURRENT, replicaId -> get(get, replicaId), readRpcTimeoutNs,
|
RegionLocateType.CURRENT, replicaId -> get(get, replicaId), readRpcTimeoutNs,
|
||||||
conn.connConf.getPrimaryCallTimeoutNs(), retryTimer);
|
conn.connConf.getPrimaryCallTimeoutNs(), retryTimer, conn.getConnectionMetrics());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -22,9 +22,8 @@ import static org.junit.Assert.assertEquals;
|
||||||
import com.codahale.metrics.RatioGauge;
|
import com.codahale.metrics.RatioGauge;
|
||||||
import com.codahale.metrics.RatioGauge.Ratio;
|
import com.codahale.metrics.RatioGauge.Ratio;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.concurrent.ExecutorService;
|
|
||||||
import java.util.concurrent.Executors;
|
import java.util.concurrent.Executors;
|
||||||
import java.util.concurrent.atomic.AtomicBoolean;
|
import java.util.concurrent.ThreadPoolExecutor;
|
||||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||||
import org.apache.hadoop.hbase.testclassification.ClientTests;
|
import org.apache.hadoop.hbase.testclassification.ClientTests;
|
||||||
import org.apache.hadoop.hbase.testclassification.MetricsTests;
|
import org.apache.hadoop.hbase.testclassification.MetricsTests;
|
||||||
|
@ -35,7 +34,6 @@ import org.junit.BeforeClass;
|
||||||
import org.junit.ClassRule;
|
import org.junit.ClassRule;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.junit.experimental.categories.Category;
|
import org.junit.experimental.categories.Category;
|
||||||
import org.mockito.Mockito;
|
|
||||||
|
|
||||||
import org.apache.hbase.thirdparty.com.google.protobuf.ByteString;
|
import org.apache.hbase.thirdparty.com.google.protobuf.ByteString;
|
||||||
|
|
||||||
|
@ -57,13 +55,11 @@ public class TestMetricsConnection {
|
||||||
HBaseClassTestRule.forClass(TestMetricsConnection.class);
|
HBaseClassTestRule.forClass(TestMetricsConnection.class);
|
||||||
|
|
||||||
private static MetricsConnection METRICS;
|
private static MetricsConnection METRICS;
|
||||||
private static final ExecutorService BATCH_POOL = Executors.newFixedThreadPool(2);
|
private static final ThreadPoolExecutor BATCH_POOL =
|
||||||
|
(ThreadPoolExecutor) Executors.newFixedThreadPool(2);
|
||||||
@BeforeClass
|
@BeforeClass
|
||||||
public static void beforeClass() {
|
public static void beforeClass() {
|
||||||
ConnectionImplementation mocked = Mockito.mock(ConnectionImplementation.class);
|
METRICS = new MetricsConnection("mocked-connection", () -> BATCH_POOL, () -> null);
|
||||||
Mockito.when(mocked.toString()).thenReturn("mocked-connection");
|
|
||||||
Mockito.when(mocked.getCurrentBatchPool()).thenReturn(BATCH_POOL);
|
|
||||||
METRICS = new MetricsConnection(mocked);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@AfterClass
|
@AfterClass
|
||||||
|
|
Loading…
Reference in New Issue