HBASE-25687 Backport "HBASE-25681 Add a switch for server/table query… (#3074)

Signed-off-by: stack <stack@apache.org>
This commit is contained in:
Baiqiang Zhao 2021-04-08 02:11:46 +08:00 committed by GitHub
parent 50bd11a2e3
commit 8ff17c68e2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 139 additions and 24 deletions

View File

@ -25,6 +25,9 @@ import org.apache.yetus.audience.InterfaceAudience;
@InterfaceAudience.Private @InterfaceAudience.Private
public interface MetricsTableQueryMeter { public interface MetricsTableQueryMeter {
String TABLE_READ_QUERY_PER_SECOND = "tableReadQueryPerSecond";
String TABLE_WRITE_QUERY_PER_SECOND = "tableWriteQueryPerSecond";
/** /**
* Update table read QPS * Update table read QPS
* @param tableName The table the metric is for * @param tableName The table the metric is for

View File

@ -149,6 +149,16 @@ public interface MetricsAssertHelper {
*/ */
boolean checkCounterExists(String name, BaseSource source); boolean checkCounterExists(String name, BaseSource source);
/**
* Check if a gauge exists.
*
* @param name name of the gauge.
* @param source The BaseSource{@link BaseSource} that will provide the tags,
* gauges, and counters.
* @return boolean true if gauge metric exists.
*/
boolean checkGaugeExists(String name, BaseSource source);
/** /**
* Get the value of a gauge as a double. * Get the value of a gauge as a double.
* *

View File

@ -33,9 +33,6 @@ public class MetricsTableQueryMeterImpl implements MetricsTableQueryMeter {
private final Map<TableName, TableMeters> metersByTable = new ConcurrentHashMap<>(); private final Map<TableName, TableMeters> metersByTable = new ConcurrentHashMap<>();
private final MetricRegistry metricRegistry; private final MetricRegistry metricRegistry;
private final static String TABLE_READ_QUERY_PER_SECOND = "tableReadQueryPerSecond";
private final static String TABLE_WRITE_QUERY_PER_SECOND = "tableWriteQueryPerSecond";
public MetricsTableQueryMeterImpl(MetricRegistry metricRegistry) { public MetricsTableQueryMeterImpl(MetricRegistry metricRegistry) {
this.metricRegistry = metricRegistry; this.metricRegistry = metricRegistry;
} }

View File

@ -211,7 +211,14 @@ public class MetricsAssertHelperImpl implements MetricsAssertHelper {
public boolean checkCounterExists(String name, BaseSource source) { public boolean checkCounterExists(String name, BaseSource source) {
getMetrics(source); getMetrics(source);
String cName = canonicalizeMetricName(name); String cName = canonicalizeMetricName(name);
return (counters.get(cName) != null) ? true : false; return counters.get(cName) != null;
}
@Override
public boolean checkGaugeExists(String name, BaseSource source) {
getMetrics(source);
String cName = canonicalizeMetricName(name);
return gauges.get(cName) != null;
} }
@Override @Override

View File

@ -40,6 +40,12 @@ public class MetricsRegionServer {
public static final String RS_ENABLE_TABLE_METRICS_KEY = public static final String RS_ENABLE_TABLE_METRICS_KEY =
"hbase.regionserver.enable.table.latencies"; "hbase.regionserver.enable.table.latencies";
public static final boolean RS_ENABLE_TABLE_METRICS_DEFAULT = true; public static final boolean RS_ENABLE_TABLE_METRICS_DEFAULT = true;
public static final String RS_ENABLE_SERVER_QUERY_METER_METRICS_KEY =
"hbase.regionserver.enable.server.query.meter";
public static final boolean RS_ENABLE_SERVER_QUERY_METER_METRICS_KEY_DEFAULT = true;
public static final String RS_ENABLE_TABLE_QUERY_METER_METRICS_KEY =
"hbase.regionserver.enable.table.query.meter";
public static final boolean RS_ENABLE_TABLE_QUERY_METER_METRICS_KEY_DEFAULT = true;
public static final String SLOW_METRIC_TIME = "hbase.ipc.slow.metric.time"; public static final String SLOW_METRIC_TIME = "hbase.ipc.slow.metric.time";
private final MetricsRegionServerSource serverSource; private final MetricsRegionServerSource serverSource;
@ -73,9 +79,12 @@ public class MetricsRegionServer {
bulkLoadTimer = metricRegistry.timer("Bulkload"); bulkLoadTimer = metricRegistry.timer("Bulkload");
slowMetricTime = conf.getLong(SLOW_METRIC_TIME, DEFAULT_SLOW_METRIC_TIME); slowMetricTime = conf.getLong(SLOW_METRIC_TIME, DEFAULT_SLOW_METRIC_TIME);
quotaSource = CompatibilitySingletonFactory.getInstance(MetricsRegionServerQuotaSource.class);
if (conf.getBoolean(RS_ENABLE_SERVER_QUERY_METER_METRICS_KEY,
RS_ENABLE_SERVER_QUERY_METER_METRICS_KEY_DEFAULT)) {
serverReadQueryMeter = metricRegistry.meter("ServerReadQueryPerSecond"); serverReadQueryMeter = metricRegistry.meter("ServerReadQueryPerSecond");
serverWriteQueryMeter = metricRegistry.meter("ServerWriteQueryPerSecond"); serverWriteQueryMeter = metricRegistry.meter("ServerWriteQueryPerSecond");
quotaSource = CompatibilitySingletonFactory.getInstance(MetricsRegionServerQuotaSource.class); }
} }
MetricsRegionServer(MetricsRegionServerWrapper regionServerWrapper, MetricsRegionServer(MetricsRegionServerWrapper regionServerWrapper,
@ -93,7 +102,9 @@ public class MetricsRegionServer {
*/ */
static RegionServerTableMetrics createTableMetrics(Configuration conf) { static RegionServerTableMetrics createTableMetrics(Configuration conf) {
if (conf.getBoolean(RS_ENABLE_TABLE_METRICS_KEY, RS_ENABLE_TABLE_METRICS_DEFAULT)) { if (conf.getBoolean(RS_ENABLE_TABLE_METRICS_KEY, RS_ENABLE_TABLE_METRICS_DEFAULT)) {
return new RegionServerTableMetrics(); return new RegionServerTableMetrics(
conf.getBoolean(RS_ENABLE_TABLE_QUERY_METER_METRICS_KEY,
RS_ENABLE_TABLE_QUERY_METER_METRICS_KEY_DEFAULT));
} }
return null; return null;
} }
@ -270,21 +281,27 @@ public class MetricsRegionServer {
if (tableMetrics != null && tn != null) { if (tableMetrics != null && tn != null) {
tableMetrics.updateTableReadQueryMeter(tn, count); tableMetrics.updateTableReadQueryMeter(tn, count);
} }
this.serverReadQueryMeter.mark(count); if (serverReadQueryMeter != null) {
serverReadQueryMeter.mark(count);
}
} }
public void updateWriteQueryMeter(TableName tn, long count) { public void updateWriteQueryMeter(TableName tn, long count) {
if (tableMetrics != null && tn != null) { if (tableMetrics != null && tn != null) {
tableMetrics.updateTableWriteQueryMeter(tn, count); tableMetrics.updateTableWriteQueryMeter(tn, count);
} }
this.serverWriteQueryMeter.mark(count); if (serverWriteQueryMeter != null) {
serverWriteQueryMeter.mark(count);
}
} }
public void updateWriteQueryMeter(TableName tn) { public void updateWriteQueryMeter(TableName tn) {
if (tableMetrics != null && tn != null) { if (tableMetrics != null && tn != null) {
tableMetrics.updateTableWriteQueryMeter(tn); tableMetrics.updateTableWriteQueryMeter(tn);
} }
this.serverWriteQueryMeter.mark(); if (serverWriteQueryMeter != null) {
serverWriteQueryMeter.mark();
}
} }
/** /**

View File

@ -29,13 +29,15 @@ import org.apache.yetus.audience.InterfaceAudience;
public class RegionServerTableMetrics { public class RegionServerTableMetrics {
private final MetricsTableLatencies latencies; private final MetricsTableLatencies latencies;
private final MetricsTableQueryMeter queryMeter; private MetricsTableQueryMeter queryMeter;
public RegionServerTableMetrics() { public RegionServerTableMetrics(boolean enableTableQueryMeter) {
latencies = CompatibilitySingletonFactory.getInstance(MetricsTableLatencies.class); latencies = CompatibilitySingletonFactory.getInstance(MetricsTableLatencies.class);
if (enableTableQueryMeter) {
queryMeter = new MetricsTableQueryMeterImpl(MetricRegistries.global(). queryMeter = new MetricsTableQueryMeterImpl(MetricRegistries.global().
get(((MetricsTableLatenciesImpl) latencies).getMetricRegistryInfo()).get()); get(((MetricsTableLatenciesImpl) latencies).getMetricRegistryInfo()).get());
} }
}
public void updatePut(TableName table, long time) { public void updatePut(TableName table, long time) {
latencies.updatePut(table.getNameAsString(), time); latencies.updatePut(table.getNameAsString(), time);
@ -86,18 +88,20 @@ public class RegionServerTableMetrics {
} }
public void updateTableReadQueryMeter(TableName table, long count) { public void updateTableReadQueryMeter(TableName table, long count) {
if (queryMeter != null) {
queryMeter.updateTableReadQueryMeter(table, count); queryMeter.updateTableReadQueryMeter(table, count);
} }
public void updateTableReadQueryMeter(TableName table) {
queryMeter.updateTableReadQueryMeter(table);
} }
public void updateTableWriteQueryMeter(TableName table, long count) { public void updateTableWriteQueryMeter(TableName table, long count) {
if (queryMeter != null) {
queryMeter.updateTableWriteQueryMeter(table, count); queryMeter.updateTableWriteQueryMeter(table, count);
} }
}
public void updateTableWriteQueryMeter(TableName table) { public void updateTableWriteQueryMeter(TableName table) {
if (queryMeter != null) {
queryMeter.updateTableWriteQueryMeter(table); queryMeter.updateTableWriteQueryMeter(table);
} }
} }
}

View File

@ -17,11 +17,14 @@
*/ */
package org.apache.hadoop.hbase.regionserver; package org.apache.hadoop.hbase.regionserver;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.CompatibilityFactory; import org.apache.hadoop.hbase.CompatibilityFactory;
import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.test.MetricsAssertHelper; import org.apache.hadoop.hbase.test.MetricsAssertHelper;
import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests;
import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.testclassification.SmallTests;
@ -57,7 +60,9 @@ public class TestMetricsRegionServer {
@Before @Before
public void setUp() { public void setUp() {
wrapper = new MetricsRegionServerWrapperStub(); wrapper = new MetricsRegionServerWrapperStub();
rsm = new MetricsRegionServer(wrapper, new Configuration(false), null); Configuration conf = new Configuration(false);
conf.setBoolean(MetricsRegionServer.RS_ENABLE_SERVER_QUERY_METER_METRICS_KEY, false);
rsm = new MetricsRegionServer(wrapper, conf, null);
serverSource = rsm.getMetricsSource(); serverSource = rsm.getMetricsSource();
} }
@ -245,5 +250,26 @@ public class TestMetricsRegionServer {
HELPER.assertCounter("pauseTimeWithGc_num_ops", 1, serverSource); HELPER.assertCounter("pauseTimeWithGc_num_ops", 1, serverSource);
} }
@Test
public void testServerQueryMeterSwitch() {
TableName tn1 = TableName.valueOf("table1");
// has been set disable in setUp()
rsm.updateReadQueryMeter(tn1, 500L);
assertFalse(HELPER.checkGaugeExists("ServerReadQueryPerSecond_count", serverSource));
rsm.updateWriteQueryMeter(tn1, 500L);
assertFalse(HELPER.checkGaugeExists("ServerWriteQueryPerSecond_count", serverSource));
// enable
Configuration conf = new Configuration(false);
conf.setBoolean(MetricsRegionServer.RS_ENABLE_SERVER_QUERY_METER_METRICS_KEY, true);
rsm = new MetricsRegionServer(wrapper, conf, null);
serverSource = rsm.getMetricsSource();
rsm.updateReadQueryMeter(tn1, 500L);
assertTrue(HELPER.checkGaugeExists("ServerWriteQueryPerSecond_count", serverSource));
HELPER.assertGauge("ServerReadQueryPerSecond_count", 500L, serverSource);
assertTrue(HELPER.checkGaugeExists("ServerWriteQueryPerSecond_count", serverSource));
rsm.updateWriteQueryMeter(tn1, 500L);
HELPER.assertGauge("ServerWriteQueryPerSecond_count", 500L, serverSource);
}
} }

View File

@ -17,10 +17,12 @@
*/ */
package org.apache.hadoop.hbase.regionserver; package org.apache.hadoop.hbase.regionserver;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import java.io.IOException; import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.CompatibilityFactory; import org.apache.hadoop.hbase.CompatibilityFactory;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory; import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseClassTestRule;
@ -51,7 +53,7 @@ public class TestMetricsTableLatencies {
assertTrue("'latencies' is actually " + latencies.getClass(), assertTrue("'latencies' is actually " + latencies.getClass(),
latencies instanceof MetricsTableLatenciesImpl); latencies instanceof MetricsTableLatenciesImpl);
MetricsTableLatenciesImpl latenciesImpl = (MetricsTableLatenciesImpl) latencies; MetricsTableLatenciesImpl latenciesImpl = (MetricsTableLatenciesImpl) latencies;
RegionServerTableMetrics tableMetrics = new RegionServerTableMetrics(); RegionServerTableMetrics tableMetrics = new RegionServerTableMetrics(false);
// Metrics to each table should be disjoint // Metrics to each table should be disjoint
// N.B. each call to assertGauge removes all previously acquired metrics so we have to // N.B. each call to assertGauge removes all previously acquired metrics so we have to
@ -72,4 +74,53 @@ public class TestMetricsTableLatencies {
HELPER.assertGauge(MetricsTableLatenciesImpl.qualifyMetricsName( HELPER.assertGauge(MetricsTableLatenciesImpl.qualifyMetricsName(
tn2, MetricsTableLatencies.PUT_TIME + "_" + "99th_percentile"), 75L, latenciesImpl); tn2, MetricsTableLatencies.PUT_TIME + "_" + "99th_percentile"), 75L, latenciesImpl);
} }
@Test
public void testTableQueryMeterSwitch() {
TableName tn1 = TableName.valueOf("table1");
MetricsTableLatencies latencies = CompatibilitySingletonFactory.getInstance(
MetricsTableLatencies.class);
assertTrue("'latencies' is actually " + latencies.getClass(),
latencies instanceof MetricsTableLatenciesImpl);
MetricsTableLatenciesImpl latenciesImpl = (MetricsTableLatenciesImpl) latencies;
Configuration conf = new Configuration();
conf.setBoolean(MetricsRegionServer.RS_ENABLE_TABLE_QUERY_METER_METRICS_KEY, false);
boolean enableTableQueryMeter = conf.getBoolean(
MetricsRegionServer.RS_ENABLE_TABLE_QUERY_METER_METRICS_KEY,
MetricsRegionServer.RS_ENABLE_TABLE_QUERY_METER_METRICS_KEY_DEFAULT);
// disable
assertFalse(enableTableQueryMeter);
RegionServerTableMetrics tableMetrics = new RegionServerTableMetrics(enableTableQueryMeter);
tableMetrics.updateTableReadQueryMeter(tn1, 500L);
assertFalse(HELPER.checkGaugeExists(MetricsTableLatenciesImpl.qualifyMetricsName(
tn1, MetricsTableQueryMeterImpl.TABLE_READ_QUERY_PER_SECOND + "_" + "count"),
latenciesImpl));
tableMetrics.updateTableWriteQueryMeter(tn1, 500L);
assertFalse(HELPER.checkGaugeExists(MetricsTableLatenciesImpl.qualifyMetricsName(
tn1, MetricsTableQueryMeterImpl.TABLE_WRITE_QUERY_PER_SECOND + "_" + "count"),
latenciesImpl));
// enable
conf.setBoolean(MetricsRegionServer.RS_ENABLE_TABLE_QUERY_METER_METRICS_KEY, true);
enableTableQueryMeter = conf.getBoolean(
MetricsRegionServer.RS_ENABLE_TABLE_QUERY_METER_METRICS_KEY,
MetricsRegionServer.RS_ENABLE_TABLE_QUERY_METER_METRICS_KEY_DEFAULT);
assertTrue(enableTableQueryMeter);
tableMetrics = new RegionServerTableMetrics(true);
tableMetrics.updateTableReadQueryMeter(tn1, 500L);
assertTrue(HELPER.checkGaugeExists(MetricsTableLatenciesImpl.qualifyMetricsName(
tn1, MetricsTableQueryMeterImpl.TABLE_READ_QUERY_PER_SECOND + "_" + "count"),
latenciesImpl));
HELPER.assertGauge(MetricsTableLatenciesImpl.qualifyMetricsName(
tn1, MetricsTableQueryMeterImpl.TABLE_READ_QUERY_PER_SECOND + "_" + "count"),
500L, latenciesImpl);
tableMetrics.updateTableWriteQueryMeter(tn1, 500L);
assertTrue(HELPER.checkGaugeExists(MetricsTableLatenciesImpl.qualifyMetricsName(
tn1, MetricsTableQueryMeterImpl.TABLE_WRITE_QUERY_PER_SECOND + "_" + "count"),
latenciesImpl));
HELPER.assertGauge(MetricsTableLatenciesImpl.qualifyMetricsName(
tn1, MetricsTableQueryMeterImpl.TABLE_WRITE_QUERY_PER_SECOND + "_" + "count"),
500L, latenciesImpl);
}
} }