HBASE-21727 Simplify documentation around client timeout

Signed-off-by: Michael Stack <stack@apache.org>
This commit is contained in:
Peter Somogyi 2019-01-15 15:24:34 +01:00
parent 400276e3ec
commit 51c58e083c
8 changed files with 30 additions and 76 deletions

View File

@ -35,7 +35,6 @@ import static org.apache.hadoop.hbase.HConstants.HBASE_CLIENT_SCANNER_CACHING;
import static org.apache.hadoop.hbase.HConstants.HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY;
import static org.apache.hadoop.hbase.HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD;
import static org.apache.hadoop.hbase.HConstants.HBASE_META_SCANNER_CACHING;
import static org.apache.hadoop.hbase.HConstants.HBASE_REGIONSERVER_LEASE_PERIOD_KEY;
import static org.apache.hadoop.hbase.HConstants.HBASE_RPC_READ_TIMEOUT_KEY;
import static org.apache.hadoop.hbase.HConstants.HBASE_RPC_TIMEOUT_KEY;
import static org.apache.hadoop.hbase.HConstants.HBASE_RPC_WRITE_TIMEOUT_KEY;
@ -52,7 +51,6 @@ import static org.apache.hadoop.hbase.client.ConnectionConfiguration.WRITE_BUFFE
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.yetus.audience.InterfaceAudience;
/**
@ -108,7 +106,6 @@ class AsyncConnectionConfiguration {
private final long primaryMetaScanTimeoutNs;
@SuppressWarnings("deprecation")
AsyncConnectionConfiguration(Configuration conf) {
this.metaOperationTimeoutNs = TimeUnit.MILLISECONDS.toNanos(
conf.getLong(HBASE_CLIENT_META_OPERATION_TIMEOUT, DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT));
@ -125,9 +122,9 @@ class AsyncConnectionConfiguration {
this.maxRetries = conf.getInt(HBASE_CLIENT_RETRIES_NUMBER, DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
this.startLogErrorsCnt =
conf.getInt(START_LOG_ERRORS_AFTER_COUNT_KEY, DEFAULT_START_LOG_ERRORS_AFTER_COUNT);
this.scanTimeoutNs = TimeUnit.MILLISECONDS
.toNanos(HBaseConfiguration.getInt(conf, HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD,
HBASE_REGIONSERVER_LEASE_PERIOD_KEY, DEFAULT_HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD));
this.scanTimeoutNs = TimeUnit.MILLISECONDS.toNanos(
conf.getInt(HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD,
DEFAULT_HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD));
this.scannerCaching =
conf.getInt(HBASE_CLIENT_SCANNER_CACHING, DEFAULT_HBASE_CLIENT_SCANNER_CACHING);
this.metaScannerCaching =

View File

@ -35,7 +35,6 @@ import org.apache.hadoop.hbase.client.ScannerCallable.MoreResults;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException;
import org.apache.hadoop.hbase.exceptions.ScannerResetException;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
@ -119,10 +118,8 @@ public abstract class ClientScanner extends AbstractClientScanner {
this.maxScannerResultSize = conf.getLong(HConstants.HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY,
HConstants.DEFAULT_HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE);
}
this.scannerTimeout =
HBaseConfiguration.getInt(conf, HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD,
HConstants.HBASE_REGIONSERVER_LEASE_PERIOD_KEY,
HConstants.DEFAULT_HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD);
this.scannerTimeout = conf.getInt(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD,
HConstants.DEFAULT_HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD);
// check if application wants to collect scan metrics
initScanMetrics(scan);

View File

@ -121,11 +121,12 @@ public class RpcRetryingCallerImpl<T> implements RpcRetryingCaller<T> {
if (tries > startLogErrorsCnt) {
if (LOG.isInfoEnabled()) {
StringBuilder builder = new StringBuilder("Call exception, tries=").append(tries)
.append(", retries=").append(maxAttempts).append(", started=")
.append((EnvironmentEdgeManager.currentTime() - tracker.getStartTime()))
.append(" ms ago, ").append("cancelled=").append(cancelled.get())
.append(", msg=").append(t.getMessage())
.append(", details=").append(callable.getExceptionMessageAdditionalDetail());
.append(", retries=").append(maxAttempts).append(", started=")
.append((EnvironmentEdgeManager.currentTime() - tracker.getStartTime()))
.append(" ms ago, ").append("cancelled=").append(cancelled.get())
.append(", msg=").append(t.getMessage())
.append(", details=").append(callable.getExceptionMessageAdditionalDetail())
.append(", see https://s.apache.org/timeout");
if (LOG.isDebugEnabled()) {
builder.append(", exception=").append(StringUtils.stringifyException(t));
LOG.debug(builder.toString());

View File

@ -169,35 +169,6 @@ public class HBaseConfiguration extends Configuration {
return isShowConf;
}
/**
* Get the value of the <code>name</code> property as an <code>int</code>, possibly
* referring to the deprecated name of the configuration property.
* If no such property exists, the provided default value is returned,
* or if the specified value is not a valid <code>int</code>,
* then an error is thrown.
*
* @param name property name.
* @param deprecatedName a deprecatedName for the property to use
* if non-deprecated name is not used
* @param defaultValue default value.
* @throws NumberFormatException when the value is invalid
* @return property value as an <code>int</code>,
* or <code>defaultValue</code>.
*/
// TODO: developer note: This duplicates the functionality of deprecated
// property support in Configuration in Hadoop 2. But since Hadoop-1 does not
// contain these changes, we will do our own as usual. Replace these when H2 is default.
public static int getInt(Configuration conf, String name,
String deprecatedName, int defaultValue) {
if (conf.get(deprecatedName) != null) {
LOG.warn(String.format("Config option \"%s\" is deprecated. Instead, use \"%s\""
, deprecatedName, name));
return conf.getInt(deprecatedName, defaultValue);
} else {
return conf.getInt(name, defaultValue);
}
}
/**
* Get the password from the Configuration instance using the
* getPassword method if it exists. If not, then fall back to the

View File

@ -309,7 +309,7 @@ public final class HConstants {
/** Parameter name for HBase client operation timeout. */
public static final String HBASE_CLIENT_OPERATION_TIMEOUT = "hbase.client.operation.timeout";
/** Parameter name for HBase client operation timeout. */
/** Parameter name for HBase client meta operation timeout. */
public static final String HBASE_CLIENT_META_OPERATION_TIMEOUT =
"hbase.client.meta.operation.timeout";

View File

@ -54,31 +54,6 @@ public class TestHBaseConfiguration {
UTIL.cleanupTestDir();
}
@Test
public void testGetIntDeprecated() {
int VAL = 1, VAL2 = 2;
String NAME = "foo";
String DEPRECATED_NAME = "foo.deprecated";
Configuration conf = HBaseConfiguration.create();
conf.setInt(NAME, VAL);
assertEquals(VAL, HBaseConfiguration.getInt(conf, NAME, DEPRECATED_NAME, 0));
conf = HBaseConfiguration.create();
conf.setInt(DEPRECATED_NAME, VAL);
assertEquals(VAL, HBaseConfiguration.getInt(conf, NAME, DEPRECATED_NAME, 0));
conf = HBaseConfiguration.create();
conf.setInt(DEPRECATED_NAME, VAL);
conf.setInt(NAME, VAL);
assertEquals(VAL, HBaseConfiguration.getInt(conf, NAME, DEPRECATED_NAME, 0));
conf = HBaseConfiguration.create();
conf.setInt(DEPRECATED_NAME, VAL);
conf.setInt(NAME, VAL2); // deprecated value will override this
assertEquals(VAL, HBaseConfiguration.getInt(conf, NAME, DEPRECATED_NAME, 0));
}
@Test
public void testSubset() throws Exception {
Configuration conf = HBaseConfiguration.create();

View File

@ -614,12 +614,25 @@ If multiple ZooKeeper instances make up your ZooKeeper ensemble, they may be spe
[[config_timeouts]]
=== Timeout settings
HBase provides many timeout settings to limit the execution time of different remote operations.
HBase provides a wide variety of timeout settings to limit the execution time of various remote operations.
* hbase.rpc.timeout
* hbase.rpc.read.timeout
* hbase.rpc.write.timeout
* hbase.client.operation.timeout
* hbase.client.meta.operation.timeout
* hbase.client.scanner.timeout.period
The `hbase.rpc.timeout` property limits how long a single RPC call can run before timing out.
To fine tune read or write related RPC timeouts set `hbase.rpc.read.timeout` and `hbase.rpc.write.timeout` configuration properties.
In the absence of these properties `hbase.rpc.timeout` will be used.
The `hbase.rpc.timeout` property limits how long an RPC call can run before it times out.
You can also specify a timeout for read and write operations using `hbase.rpc.read.timeout` and `hbase.rpc.write.timeout` configuration properties. In the absence of these properties `hbase.rpc.timeout` will be used.
A higher-level timeout is `hbase.client.operation.timeout` which is valid for each client call.
Timeout for scan operations is controlled differently. To set it you can use `hbase.client.scanner.timeout.period` property.
When an RPC call fails for instance for a timeout due to `hbase.rpc.timeout` it will be retried until `hbase.client.operation.timeout` is reached.
Client operation timeout for system tables can be fine tuned by setting `hbase.client.meta.operation.timeout` configuration value.
When this is not set its value will use `hbase.client.operation.timeout`.
Timeout for scan operations is controlled differently. Use `hbase.client.scanner.timeout.period` property to set this timeout.
[[example_config]]
== Example Configurations

View File

@ -608,7 +608,7 @@ See also Jesse Andersen's link:http://blog.cloudera.com/blog/2014/04/how-to-use-
In some situations clients that fetch data from a RegionServer get a LeaseException instead of the usual <<trouble.client.scantimeout>>.
Usually the source of the exception is `org.apache.hadoop.hbase.regionserver.Leases.removeLease(Leases.java:230)` (line number may vary). It tends to happen in the context of a slow/freezing `RegionServer#next` call.
It can be prevented by having `hbase.rpc.timeout` > `hbase.regionserver.lease.period`.
It can be prevented by having `hbase.rpc.timeout` > `hbase.client.scanner.timeout.period`.
Harsh J investigated the issue as part of the mailing list thread link:https://mail-archives.apache.org/mod_mbox/hbase-user/201209.mbox/%3CCAOcnVr3R-LqtKhFsk8Bhrm-YW2i9O6J6Fhjz2h7q6_sxvwd2yw%40mail.gmail.com%3E[HBase, mail # user - Lease does not exist exceptions]
[[trouble.client.scarylogs]]