HBASE-27681 Refactor Table Latency Metrics (#5072)
Signed-off-by: Bryan Beaudreault <bbeaudreault@apache.org>
This commit is contained in:
parent
586073d0c0
commit
16864c705c
|
@ -115,6 +115,7 @@ public final class GlobalMetricRegistriesAdapter {
|
|||
for (MetricRegistry registry : registries) {
|
||||
MetricRegistryInfo info = registry.getMetricRegistryInfo();
|
||||
|
||||
LOG.trace("MetricRegistryInfo : " + info.getMetricsName());
|
||||
if (info.isExistingSource()) {
|
||||
// If there is an already existing BaseSource for this MetricRegistry, skip it here. These
|
||||
// types of registries are there only due to existing BaseSource implementations in the
|
||||
|
|
|
@ -1,57 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.regionserver;
|
||||
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
||||
/**
|
||||
* Query Per Second for each table in a RegionServer.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public interface MetricsTableQueryMeter {
|
||||
|
||||
String TABLE_READ_QUERY_PER_SECOND = "tableReadQueryPerSecond";
|
||||
String TABLE_WRITE_QUERY_PER_SECOND = "tableWriteQueryPerSecond";
|
||||
|
||||
/**
|
||||
* Update table read QPS
|
||||
* @param tableName The table the metric is for
|
||||
* @param count Number of occurrences to record
|
||||
*/
|
||||
void updateTableReadQueryMeter(TableName tableName, long count);
|
||||
|
||||
/**
|
||||
* Update table read QPS
|
||||
* @param tableName The table the metric is for
|
||||
*/
|
||||
void updateTableReadQueryMeter(TableName tableName);
|
||||
|
||||
/**
|
||||
* Update table write QPS
|
||||
* @param tableName The table the metric is for
|
||||
* @param count Number of occurrences to record
|
||||
*/
|
||||
void updateTableWriteQueryMeter(TableName tableName, long count);
|
||||
|
||||
/**
|
||||
* Update table write QPS
|
||||
* @param tableName The table the metric is for
|
||||
*/
|
||||
void updateTableWriteQueryMeter(TableName tableName);
|
||||
}
|
|
@ -1,99 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.regionserver;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.metrics.Meter;
|
||||
import org.apache.hadoop.hbase.metrics.MetricRegistry;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
||||
/**
|
||||
* Implementation of {@link MetricsTableQueryMeter} to track query per second for each table in a
|
||||
* RegionServer.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class MetricsTableQueryMeterImpl implements MetricsTableQueryMeter {
|
||||
private final Map<TableName, TableMeters> metersByTable = new ConcurrentHashMap<>();
|
||||
private final MetricRegistry metricRegistry;
|
||||
|
||||
public MetricsTableQueryMeterImpl(MetricRegistry metricRegistry) {
|
||||
this.metricRegistry = metricRegistry;
|
||||
}
|
||||
|
||||
private static class TableMeters {
|
||||
final Meter tableReadQueryMeter;
|
||||
final Meter tableWriteQueryMeter;
|
||||
|
||||
TableMeters(MetricRegistry metricRegistry, TableName tableName) {
|
||||
this.tableReadQueryMeter =
|
||||
metricRegistry.meter(qualifyMetricsName(tableName, TABLE_READ_QUERY_PER_SECOND));
|
||||
this.tableWriteQueryMeter =
|
||||
metricRegistry.meter(qualifyMetricsName(tableName, TABLE_WRITE_QUERY_PER_SECOND));
|
||||
}
|
||||
|
||||
public void updateTableReadQueryMeter(long count) {
|
||||
tableReadQueryMeter.mark(count);
|
||||
}
|
||||
|
||||
public void updateTableReadQueryMeter() {
|
||||
tableReadQueryMeter.mark();
|
||||
}
|
||||
|
||||
public void updateTableWriteQueryMeter(long count) {
|
||||
tableWriteQueryMeter.mark(count);
|
||||
}
|
||||
|
||||
public void updateTableWriteQueryMeter() {
|
||||
tableWriteQueryMeter.mark();
|
||||
}
|
||||
}
|
||||
|
||||
private static String qualifyMetricsName(TableName tableName, String metric) {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("Namespace_").append(tableName.getNamespaceAsString());
|
||||
sb.append("_table_").append(tableName.getQualifierAsString());
|
||||
sb.append("_metric_").append(metric);
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
private TableMeters getOrCreateTableMeter(TableName tableName) {
|
||||
return metersByTable.computeIfAbsent(tableName, tbn -> new TableMeters(metricRegistry, tbn));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void updateTableReadQueryMeter(TableName tableName, long count) {
|
||||
getOrCreateTableMeter(tableName).updateTableReadQueryMeter(count);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void updateTableReadQueryMeter(TableName tableName) {
|
||||
getOrCreateTableMeter(tableName).updateTableReadQueryMeter();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void updateTableWriteQueryMeter(TableName tableName, long count) {
|
||||
getOrCreateTableMeter(tableName).updateTableWriteQueryMeter(count);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void updateTableWriteQueryMeter(TableName tableName) {
|
||||
getOrCreateTableMeter(tableName).updateTableWriteQueryMeter();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,62 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.metrics.impl;
|
||||
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.util.Optional;
|
||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||
import org.apache.hadoop.hbase.metrics.MetricRegistries;
|
||||
import org.apache.hadoop.hbase.metrics.MetricRegistry;
|
||||
import org.apache.hadoop.hbase.metrics.MetricRegistryInfo;
|
||||
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
import org.junit.experimental.categories.Category;
|
||||
|
||||
/**
|
||||
* Test class for {@link MetricRegistries}.
|
||||
*/
|
||||
@Category(SmallTests.class)
|
||||
public class TestMetricRegistriesImpl {
|
||||
|
||||
@ClassRule
|
||||
public static final HBaseClassTestRule CLASS_RULE =
|
||||
HBaseClassTestRule.forClass(TestMetricRegistriesImpl.class);
|
||||
|
||||
@Test
|
||||
public void testMetricsRegistriesRemoveRef() {
|
||||
MetricRegistryInfo registryInfo =
|
||||
new MetricRegistryInfo("testMetrics", null, null, null, false);
|
||||
MetricRegistries.global().create(registryInfo);
|
||||
Optional<MetricRegistry> registry1 = MetricRegistries.global().get(registryInfo);
|
||||
assertTrue(registry1.isPresent());
|
||||
|
||||
MetricRegistries.global().create(registryInfo);
|
||||
Optional<MetricRegistry> registry2 = MetricRegistries.global().get(registryInfo);
|
||||
assertTrue(registry2.isPresent());
|
||||
|
||||
MetricRegistries.global().remove(registryInfo);
|
||||
Optional<MetricRegistry> registry3 = MetricRegistries.global().get(registryInfo);
|
||||
assertTrue(registry3.isPresent());
|
||||
|
||||
MetricRegistries.global().remove(registryInfo);
|
||||
Optional<MetricRegistry> registry4 = MetricRegistries.global().get(registryInfo);
|
||||
assertTrue(!registry4.isPresent());
|
||||
}
|
||||
}
|
|
@ -148,6 +148,7 @@ import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl.Write
|
|||
import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
|
||||
import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker;
|
||||
import org.apache.hadoop.hbase.regionserver.compactions.ForbidMajorCompactionChecker;
|
||||
import org.apache.hadoop.hbase.regionserver.metrics.MetricsTableRequests;
|
||||
import org.apache.hadoop.hbase.regionserver.regionreplication.RegionReplicationSink;
|
||||
import org.apache.hadoop.hbase.regionserver.throttle.CompactionThroughputControllerFactory;
|
||||
import org.apache.hadoop.hbase.regionserver.throttle.NoLimitThroughputController;
|
||||
|
@ -373,6 +374,13 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
|
|||
isRestoredRegion = restoredRegion;
|
||||
}
|
||||
|
||||
public MetricsTableRequests getMetricsTableRequests() {
|
||||
return metricsTableRequests;
|
||||
}
|
||||
|
||||
// Handle table latency metrics
|
||||
private MetricsTableRequests metricsTableRequests;
|
||||
|
||||
// The internal wait duration to acquire a lock before read/update
|
||||
// from the region. It is not per row. The purpose of this wait time
|
||||
// is to avoid waiting a long time while the region is busy, so that
|
||||
|
@ -962,6 +970,9 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
|
|||
}
|
||||
|
||||
}
|
||||
if (metricsTableRequests != null) {
|
||||
metricsTableRequests.removeRegistry();
|
||||
}
|
||||
throw e;
|
||||
} finally {
|
||||
// nextSeqid will be -1 if the initialization fails.
|
||||
|
@ -1091,6 +1102,9 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
|
|||
status.setStatus("Running coprocessor post-open hooks");
|
||||
coprocessorHost.postOpen();
|
||||
}
|
||||
|
||||
metricsTableRequests = new MetricsTableRequests(htableDescriptor.getTableName(), conf);
|
||||
|
||||
status.markComplete("Region opened successfully");
|
||||
return nextSeqId;
|
||||
}
|
||||
|
@ -1875,6 +1889,13 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
|
|||
writeRegionCloseMarker(wal);
|
||||
}
|
||||
this.closed.set(true);
|
||||
|
||||
// Decrease refCount of table latency metric registry.
|
||||
// Do this after closed#set to make sure only -1.
|
||||
if (metricsTableRequests != null) {
|
||||
metricsTableRequests.removeRegistry();
|
||||
}
|
||||
|
||||
if (!canFlush) {
|
||||
decrMemStoreSize(this.memStoreSizing.getMemStoreSize());
|
||||
} else if (this.memStoreSizing.getDataSize() != 0) {
|
||||
|
@ -4691,8 +4712,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
|
|||
}
|
||||
} finally {
|
||||
if (rsServices != null && rsServices.getMetrics() != null) {
|
||||
rsServices.getMetrics().updateWriteQueryMeter(this.htableDescriptor.getTableName(),
|
||||
batchOp.size());
|
||||
rsServices.getMetrics().updateWriteQueryMeter(this, batchOp.size());
|
||||
}
|
||||
batchOp.closeRegionOperation();
|
||||
}
|
||||
|
@ -7885,7 +7905,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
|
|||
this.metricsRegion.updateGet(EnvironmentEdgeManager.currentTime() - before);
|
||||
}
|
||||
if (this.rsServices != null && this.rsServices.getMetrics() != null) {
|
||||
rsServices.getMetrics().updateReadQueryMeter(getRegionInfo().getTable(), 1);
|
||||
rsServices.getMetrics().updateReadQueryMeter(this, 1);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.regionserver;
|
|||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.metrics.Meter;
|
||||
import org.apache.hadoop.hbase.metrics.MetricRegistries;
|
||||
import org.apache.hadoop.hbase.metrics.MetricRegistry;
|
||||
|
@ -36,20 +35,12 @@ import org.apache.yetus.audience.InterfaceStability;
|
|||
@InterfaceStability.Evolving
|
||||
@InterfaceAudience.Private
|
||||
public class MetricsRegionServer {
|
||||
public static final String RS_ENABLE_TABLE_METRICS_KEY =
|
||||
"hbase.regionserver.enable.table.latencies";
|
||||
public static final boolean RS_ENABLE_TABLE_METRICS_DEFAULT = true;
|
||||
public static final String RS_ENABLE_SERVER_QUERY_METER_METRICS_KEY =
|
||||
"hbase.regionserver.enable.server.query.meter";
|
||||
public static final boolean RS_ENABLE_SERVER_QUERY_METER_METRICS_KEY_DEFAULT = false;
|
||||
public static final String RS_ENABLE_TABLE_QUERY_METER_METRICS_KEY =
|
||||
"hbase.regionserver.enable.table.query.meter";
|
||||
public static final boolean RS_ENABLE_TABLE_QUERY_METER_METRICS_KEY_DEFAULT = false;
|
||||
|
||||
public static final String SLOW_METRIC_TIME = "hbase.ipc.slow.metric.time";
|
||||
private final MetricsRegionServerSource serverSource;
|
||||
private final MetricsRegionServerWrapper regionServerWrapper;
|
||||
private RegionServerTableMetrics tableMetrics;
|
||||
private final MetricsTable metricsTable;
|
||||
private MetricsRegionServerQuotaSource quotaSource;
|
||||
private final MetricsUserAggregate userAggregate;
|
||||
|
@ -68,8 +59,7 @@ public class MetricsRegionServer {
|
|||
this(regionServerWrapper,
|
||||
CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class)
|
||||
.createServer(regionServerWrapper),
|
||||
createTableMetrics(conf), metricsTable,
|
||||
MetricsUserAggregateFactory.getMetricsUserAggregate(conf));
|
||||
metricsTable, MetricsUserAggregateFactory.getMetricsUserAggregate(conf));
|
||||
|
||||
// Create hbase-metrics module based metrics. The registry should already be registered by the
|
||||
// MetricsRegionServerSource
|
||||
|
@ -90,26 +80,14 @@ public class MetricsRegionServer {
|
|||
}
|
||||
|
||||
MetricsRegionServer(MetricsRegionServerWrapper regionServerWrapper,
|
||||
MetricsRegionServerSource serverSource, RegionServerTableMetrics tableMetrics,
|
||||
MetricsTable metricsTable, MetricsUserAggregate userAggregate) {
|
||||
MetricsRegionServerSource serverSource, MetricsTable metricsTable,
|
||||
MetricsUserAggregate userAggregate) {
|
||||
this.regionServerWrapper = regionServerWrapper;
|
||||
this.serverSource = serverSource;
|
||||
this.tableMetrics = tableMetrics;
|
||||
this.metricsTable = metricsTable;
|
||||
this.userAggregate = userAggregate;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates an instance of {@link RegionServerTableMetrics} only if the feature is enabled.
|
||||
*/
|
||||
static RegionServerTableMetrics createTableMetrics(Configuration conf) {
|
||||
if (conf.getBoolean(RS_ENABLE_TABLE_METRICS_KEY, RS_ENABLE_TABLE_METRICS_DEFAULT)) {
|
||||
return new RegionServerTableMetrics(conf.getBoolean(RS_ENABLE_TABLE_QUERY_METER_METRICS_KEY,
|
||||
RS_ENABLE_TABLE_QUERY_METER_METRICS_KEY_DEFAULT));
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
public MetricsRegionServerSource getMetricsSource() {
|
||||
return serverSource;
|
||||
}
|
||||
|
@ -122,16 +100,16 @@ public class MetricsRegionServer {
|
|||
return regionServerWrapper;
|
||||
}
|
||||
|
||||
public void updatePutBatch(TableName tn, long t) {
|
||||
if (tableMetrics != null && tn != null) {
|
||||
tableMetrics.updatePutBatch(tn, t);
|
||||
public void updatePutBatch(HRegion region, long t) {
|
||||
if (region.getMetricsTableRequests() != null) {
|
||||
region.getMetricsTableRequests().updatePutBatch(t);
|
||||
}
|
||||
serverSource.updatePutBatch(t);
|
||||
}
|
||||
|
||||
public void updatePut(TableName tn, long t) {
|
||||
if (tableMetrics != null && tn != null) {
|
||||
tableMetrics.updatePut(tn, t);
|
||||
public void updatePut(HRegion region, long t) {
|
||||
if (region.getMetricsTableRequests() != null) {
|
||||
region.getMetricsTableRequests().updatePut(t);
|
||||
}
|
||||
if (t > slowMetricTime) {
|
||||
serverSource.incrSlowPut();
|
||||
|
@ -140,9 +118,9 @@ public class MetricsRegionServer {
|
|||
userAggregate.updatePut(t);
|
||||
}
|
||||
|
||||
public void updateDelete(TableName tn, long t) {
|
||||
if (tableMetrics != null && tn != null) {
|
||||
tableMetrics.updateDelete(tn, t);
|
||||
public void updateDelete(HRegion region, long t) {
|
||||
if (region.getMetricsTableRequests() != null) {
|
||||
region.getMetricsTableRequests().updateDelete(t);
|
||||
}
|
||||
if (t > slowMetricTime) {
|
||||
serverSource.incrSlowDelete();
|
||||
|
@ -151,37 +129,37 @@ public class MetricsRegionServer {
|
|||
userAggregate.updateDelete(t);
|
||||
}
|
||||
|
||||
public void updateDeleteBatch(TableName tn, long t) {
|
||||
if (tableMetrics != null && tn != null) {
|
||||
tableMetrics.updateDeleteBatch(tn, t);
|
||||
public void updateDeleteBatch(HRegion region, long t) {
|
||||
if (region.getMetricsTableRequests() != null) {
|
||||
region.getMetricsTableRequests().updateDeleteBatch(t);
|
||||
}
|
||||
serverSource.updateDeleteBatch(t);
|
||||
}
|
||||
|
||||
public void updateCheckAndDelete(TableName tn, long t) {
|
||||
if (tableMetrics != null && tn != null) {
|
||||
tableMetrics.updateCheckAndDelete(tn, t);
|
||||
public void updateCheckAndDelete(HRegion region, long t) {
|
||||
if (region.getMetricsTableRequests() != null) {
|
||||
region.getMetricsTableRequests().updateCheckAndDelete(t);
|
||||
}
|
||||
serverSource.updateCheckAndDelete(t);
|
||||
}
|
||||
|
||||
public void updateCheckAndPut(TableName tn, long t) {
|
||||
if (tableMetrics != null && tn != null) {
|
||||
tableMetrics.updateCheckAndPut(tn, t);
|
||||
public void updateCheckAndPut(HRegion region, long t) {
|
||||
if (region.getMetricsTableRequests() != null) {
|
||||
region.getMetricsTableRequests().updateCheckAndPut(t);
|
||||
}
|
||||
serverSource.updateCheckAndPut(t);
|
||||
}
|
||||
|
||||
public void updateCheckAndMutate(TableName tn, long t) {
|
||||
if (tableMetrics != null && tn != null) {
|
||||
tableMetrics.updateCheckAndMutate(tn, t);
|
||||
public void updateCheckAndMutate(HRegion region, long t) {
|
||||
if (region.getMetricsTableRequests() != null) {
|
||||
region.getMetricsTableRequests().updateCheckAndMutate(t);
|
||||
}
|
||||
serverSource.updateCheckAndMutate(t);
|
||||
}
|
||||
|
||||
public void updateGet(TableName tn, long t) {
|
||||
if (tableMetrics != null && tn != null) {
|
||||
tableMetrics.updateGet(tn, t);
|
||||
public void updateGet(HRegion region, long t) {
|
||||
if (region.getMetricsTableRequests() != null) {
|
||||
region.getMetricsTableRequests().updateGet(t);
|
||||
}
|
||||
if (t > slowMetricTime) {
|
||||
serverSource.incrSlowGet();
|
||||
|
@ -190,9 +168,9 @@ public class MetricsRegionServer {
|
|||
userAggregate.updateGet(t);
|
||||
}
|
||||
|
||||
public void updateIncrement(TableName tn, long t) {
|
||||
if (tableMetrics != null && tn != null) {
|
||||
tableMetrics.updateIncrement(tn, t);
|
||||
public void updateIncrement(HRegion region, long t) {
|
||||
if (region.getMetricsTableRequests() != null) {
|
||||
region.getMetricsTableRequests().updateIncrement(t);
|
||||
}
|
||||
if (t > slowMetricTime) {
|
||||
serverSource.incrSlowIncrement();
|
||||
|
@ -201,9 +179,9 @@ public class MetricsRegionServer {
|
|||
userAggregate.updateIncrement(t);
|
||||
}
|
||||
|
||||
public void updateAppend(TableName tn, long t) {
|
||||
if (tableMetrics != null && tn != null) {
|
||||
tableMetrics.updateAppend(tn, t);
|
||||
public void updateAppend(HRegion region, long t) {
|
||||
if (region.getMetricsTableRequests() != null) {
|
||||
region.getMetricsTableRequests().updateAppend(t);
|
||||
}
|
||||
if (t > slowMetricTime) {
|
||||
serverSource.incrSlowAppend();
|
||||
|
@ -217,16 +195,16 @@ public class MetricsRegionServer {
|
|||
userAggregate.updateReplay(t);
|
||||
}
|
||||
|
||||
public void updateScanSize(TableName tn, long scanSize) {
|
||||
if (tableMetrics != null && tn != null) {
|
||||
tableMetrics.updateScanSize(tn, scanSize);
|
||||
public void updateScanSize(HRegion region, long scanSize) {
|
||||
if (region.getMetricsTableRequests() != null) {
|
||||
region.getMetricsTableRequests().updateScanSize(scanSize);
|
||||
}
|
||||
serverSource.updateScanSize(scanSize);
|
||||
}
|
||||
|
||||
public void updateScanTime(TableName tn, long t) {
|
||||
if (tableMetrics != null && tn != null) {
|
||||
tableMetrics.updateScanTime(tn, t);
|
||||
public void updateScanTime(HRegion region, long t) {
|
||||
if (region.getMetricsTableRequests() != null) {
|
||||
region.getMetricsTableRequests().updateScanTime(t);
|
||||
}
|
||||
serverSource.updateScanTime(t);
|
||||
userAggregate.updateScanTime(t);
|
||||
|
@ -292,27 +270,27 @@ public class MetricsRegionServer {
|
|||
quotaSource.incrementRegionSizeReportingChoreTime(time);
|
||||
}
|
||||
|
||||
public void updateReadQueryMeter(TableName tn, long count) {
|
||||
if (tableMetrics != null && tn != null) {
|
||||
tableMetrics.updateTableReadQueryMeter(tn, count);
|
||||
public void updateReadQueryMeter(HRegion region, long count) {
|
||||
if (region.getMetricsTableRequests() != null) {
|
||||
region.getMetricsTableRequests().updateTableReadQueryMeter(count);
|
||||
}
|
||||
if (serverReadQueryMeter != null) {
|
||||
serverReadQueryMeter.mark(count);
|
||||
}
|
||||
}
|
||||
|
||||
public void updateWriteQueryMeter(TableName tn, long count) {
|
||||
if (tableMetrics != null && tn != null) {
|
||||
tableMetrics.updateTableWriteQueryMeter(tn, count);
|
||||
public void updateWriteQueryMeter(HRegion region, long count) {
|
||||
if (region.getMetricsTableRequests() != null) {
|
||||
region.getMetricsTableRequests().updateTableWriteQueryMeter(count);
|
||||
}
|
||||
if (serverWriteQueryMeter != null) {
|
||||
serverWriteQueryMeter.mark(count);
|
||||
}
|
||||
}
|
||||
|
||||
public void updateWriteQueryMeter(TableName tn) {
|
||||
if (tableMetrics != null && tn != null) {
|
||||
tableMetrics.updateTableWriteQueryMeter(tn);
|
||||
public void updateWriteQueryMeter(HRegion region) {
|
||||
if (region.getMetricsTableRequests() != null) {
|
||||
region.getMetricsTableRequests().updateTableWriteQueryMeter();
|
||||
}
|
||||
if (serverWriteQueryMeter != null) {
|
||||
serverWriteQueryMeter.mark();
|
||||
|
|
|
@ -659,8 +659,7 @@ public class RSRpcServices extends HBaseRpcServicesBase<HRegionServer>
|
|||
long nonce = mutation.hasNonce() ? mutation.getNonce() : HConstants.NO_NONCE;
|
||||
Result r = region.append(append, nonceGroup, nonce);
|
||||
if (server.getMetrics() != null) {
|
||||
server.getMetrics().updateAppend(region.getTableDescriptor().getTableName(),
|
||||
EnvironmentEdgeManager.currentTime() - before);
|
||||
server.getMetrics().updateAppend(region, EnvironmentEdgeManager.currentTime() - before);
|
||||
}
|
||||
return r == null ? Result.EMPTY_RESULT : r;
|
||||
}
|
||||
|
@ -680,8 +679,7 @@ public class RSRpcServices extends HBaseRpcServicesBase<HRegionServer>
|
|||
Result r = region.increment(increment, nonceGroup, nonce);
|
||||
final MetricsRegionServer metricsRegionServer = server.getMetrics();
|
||||
if (metricsRegionServer != null) {
|
||||
metricsRegionServer.updateIncrement(region.getTableDescriptor().getTableName(),
|
||||
EnvironmentEdgeManager.currentTime() - before);
|
||||
metricsRegionServer.updateIncrement(region, EnvironmentEdgeManager.currentTime() - before);
|
||||
}
|
||||
return r == null ? Result.EMPTY_RESULT : r;
|
||||
}
|
||||
|
@ -773,8 +771,7 @@ public class RSRpcServices extends HBaseRpcServicesBase<HRegionServer>
|
|||
} finally {
|
||||
final MetricsRegionServer metricsRegionServer = server.getMetrics();
|
||||
if (metricsRegionServer != null) {
|
||||
metricsRegionServer.updateGet(region.getTableDescriptor().getTableName(),
|
||||
EnvironmentEdgeManager.currentTime() - before);
|
||||
metricsRegionServer.updateGet(region, EnvironmentEdgeManager.currentTime() - before);
|
||||
}
|
||||
}
|
||||
} else if (action.hasServiceCall()) {
|
||||
|
@ -1063,12 +1060,10 @@ public class RSRpcServices extends HBaseRpcServicesBase<HRegionServer>
|
|||
if (metricsRegionServer != null) {
|
||||
long after = EnvironmentEdgeManager.currentTime();
|
||||
if (batchContainsPuts) {
|
||||
metricsRegionServer.updatePutBatch(region.getTableDescriptor().getTableName(),
|
||||
after - starttime);
|
||||
metricsRegionServer.updatePutBatch(region, after - starttime);
|
||||
}
|
||||
if (batchContainsDelete) {
|
||||
metricsRegionServer.updateDeleteBatch(region.getTableDescriptor().getTableName(),
|
||||
after - starttime);
|
||||
metricsRegionServer.updateDeleteBatch(region, after - starttime);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2503,8 +2498,7 @@ public class RSRpcServices extends HBaseRpcServicesBase<HRegionServer>
|
|||
if (metricsRegionServer != null) {
|
||||
TableDescriptor td = region != null ? region.getTableDescriptor() : null;
|
||||
if (td != null) {
|
||||
metricsRegionServer.updateGet(td.getTableName(),
|
||||
EnvironmentEdgeManager.currentTime() - before);
|
||||
metricsRegionServer.updateGet(region, EnvironmentEdgeManager.currentTime() - before);
|
||||
}
|
||||
}
|
||||
if (quota != null) {
|
||||
|
@ -2985,7 +2979,7 @@ public class RSRpcServices extends HBaseRpcServicesBase<HRegionServer>
|
|||
MetricsRegionServer metricsRegionServer = server.getMetrics();
|
||||
if (metricsRegionServer != null) {
|
||||
long after = EnvironmentEdgeManager.currentTime();
|
||||
metricsRegionServer.updatePut(region.getRegionInfo().getTable(), after - before);
|
||||
metricsRegionServer.updatePut(region, after - before);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3001,7 +2995,7 @@ public class RSRpcServices extends HBaseRpcServicesBase<HRegionServer>
|
|||
MetricsRegionServer metricsRegionServer = server.getMetrics();
|
||||
if (metricsRegionServer != null) {
|
||||
long after = EnvironmentEdgeManager.currentTime();
|
||||
metricsRegionServer.updateDelete(region.getRegionInfo().getTable(), after - before);
|
||||
metricsRegionServer.updateDelete(region, after - before);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3028,16 +3022,15 @@ public class RSRpcServices extends HBaseRpcServicesBase<HRegionServer>
|
|||
MetricsRegionServer metricsRegionServer = server.getMetrics();
|
||||
if (metricsRegionServer != null) {
|
||||
long after = EnvironmentEdgeManager.currentTime();
|
||||
metricsRegionServer.updateCheckAndMutate(region.getRegionInfo().getTable(), after - before);
|
||||
metricsRegionServer.updateCheckAndMutate(region, after - before);
|
||||
|
||||
MutationType type = mutation.getMutateType();
|
||||
switch (type) {
|
||||
case PUT:
|
||||
metricsRegionServer.updateCheckAndPut(region.getRegionInfo().getTable(), after - before);
|
||||
metricsRegionServer.updateCheckAndPut(region, after - before);
|
||||
break;
|
||||
case DELETE:
|
||||
metricsRegionServer.updateCheckAndDelete(region.getRegionInfo().getTable(),
|
||||
after - before);
|
||||
metricsRegionServer.updateCheckAndDelete(region, after - before);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
@ -3459,12 +3452,9 @@ public class RSRpcServices extends HBaseRpcServicesBase<HRegionServer>
|
|||
region.getMetrics().updateScanTime(end - before);
|
||||
final MetricsRegionServer metricsRegionServer = server.getMetrics();
|
||||
if (metricsRegionServer != null) {
|
||||
metricsRegionServer.updateScanSize(region.getTableDescriptor().getTableName(),
|
||||
responseCellSize);
|
||||
metricsRegionServer.updateScanTime(region.getTableDescriptor().getTableName(),
|
||||
end - before);
|
||||
metricsRegionServer.updateReadQueryMeter(region.getRegionInfo().getTable(),
|
||||
numOfNextRawCalls);
|
||||
metricsRegionServer.updateScanSize(region, responseCellSize);
|
||||
metricsRegionServer.updateScanTime(region, end - before);
|
||||
metricsRegionServer.updateReadQueryMeter(region, numOfNextRawCalls);
|
||||
}
|
||||
}
|
||||
// coprocessor postNext hook
|
||||
|
|
|
@ -1,108 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.regionserver;
|
||||
|
||||
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.metrics.MetricRegistries;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
||||
/**
|
||||
* Captures operation metrics by table. Separates metrics collection for table metrics away from
|
||||
* {@link MetricsRegionServer} for encapsulation and ease of testing.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class RegionServerTableMetrics {
|
||||
|
||||
private final MetricsTableLatencies latencies;
|
||||
private MetricsTableQueryMeter queryMeter;
|
||||
|
||||
public RegionServerTableMetrics(boolean enableTableQueryMeter) {
|
||||
latencies = CompatibilitySingletonFactory.getInstance(MetricsTableLatencies.class);
|
||||
if (enableTableQueryMeter) {
|
||||
queryMeter = new MetricsTableQueryMeterImpl(MetricRegistries.global()
|
||||
.get(((MetricsTableLatenciesImpl) latencies).getMetricRegistryInfo()).get());
|
||||
}
|
||||
}
|
||||
|
||||
public void updatePut(TableName table, long time) {
|
||||
latencies.updatePut(table.getNameAsString(), time);
|
||||
}
|
||||
|
||||
public void updatePutBatch(TableName table, long time) {
|
||||
latencies.updatePutBatch(table.getNameAsString(), time);
|
||||
}
|
||||
|
||||
public void updateGet(TableName table, long time) {
|
||||
latencies.updateGet(table.getNameAsString(), time);
|
||||
}
|
||||
|
||||
public void updateIncrement(TableName table, long time) {
|
||||
latencies.updateIncrement(table.getNameAsString(), time);
|
||||
}
|
||||
|
||||
public void updateAppend(TableName table, long time) {
|
||||
latencies.updateAppend(table.getNameAsString(), time);
|
||||
}
|
||||
|
||||
public void updateDelete(TableName table, long time) {
|
||||
latencies.updateDelete(table.getNameAsString(), time);
|
||||
}
|
||||
|
||||
public void updateDeleteBatch(TableName table, long time) {
|
||||
latencies.updateDeleteBatch(table.getNameAsString(), time);
|
||||
}
|
||||
|
||||
public void updateCheckAndDelete(TableName table, long time) {
|
||||
latencies.updateCheckAndDelete(table.getNameAsString(), time);
|
||||
}
|
||||
|
||||
public void updateCheckAndPut(TableName table, long time) {
|
||||
latencies.updateCheckAndPut(table.getNameAsString(), time);
|
||||
}
|
||||
|
||||
public void updateCheckAndMutate(TableName table, long time) {
|
||||
latencies.updateCheckAndMutate(table.getNameAsString(), time);
|
||||
}
|
||||
|
||||
public void updateScanTime(TableName table, long time) {
|
||||
latencies.updateScanTime(table.getNameAsString(), time);
|
||||
}
|
||||
|
||||
public void updateScanSize(TableName table, long size) {
|
||||
latencies.updateScanSize(table.getNameAsString(), size);
|
||||
}
|
||||
|
||||
public void updateTableReadQueryMeter(TableName table, long count) {
|
||||
if (queryMeter != null) {
|
||||
queryMeter.updateTableReadQueryMeter(table, count);
|
||||
}
|
||||
}
|
||||
|
||||
public void updateTableWriteQueryMeter(TableName table, long count) {
|
||||
if (queryMeter != null) {
|
||||
queryMeter.updateTableWriteQueryMeter(table, count);
|
||||
}
|
||||
}
|
||||
|
||||
public void updateTableWriteQueryMeter(TableName table) {
|
||||
if (queryMeter != null) {
|
||||
queryMeter.updateTableWriteQueryMeter(table);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,331 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.regionserver.metrics;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.metrics.Histogram;
|
||||
import org.apache.hadoop.hbase.metrics.Meter;
|
||||
import org.apache.hadoop.hbase.metrics.MetricRegistries;
|
||||
import org.apache.hadoop.hbase.metrics.MetricRegistry;
|
||||
import org.apache.hadoop.hbase.metrics.MetricRegistryInfo;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
||||
@InterfaceAudience.Private
|
||||
public class MetricsTableRequests {
|
||||
|
||||
public static final String ENABLE_TABLE_LATENCIES_METRICS_KEY =
|
||||
"hbase.regionserver.enable.table.latencies";
|
||||
|
||||
public static final boolean ENABLE_TABLE_LATENCIES_METRICS_DEFAULT = true;
|
||||
|
||||
public static final String ENABLE_TABLE_QUERY_METER_METRICS_KEY =
|
||||
"hbase.regionserver.enable.table.query.meter";
|
||||
|
||||
public static final boolean ENABLE_TABLE_QUERY_METER_METRICS_KEY_DEFAULT = false;
|
||||
|
||||
/**
|
||||
* The name of the metrics
|
||||
*/
|
||||
private final static String METRICS_NAME = "TableRequests";
|
||||
|
||||
/**
|
||||
* The name of the metrics context that metrics will be under.
|
||||
*/
|
||||
private final static String METRICS_CONTEXT = "regionserver";
|
||||
|
||||
/**
|
||||
* Description
|
||||
*/
|
||||
private final static String METRICS_DESCRIPTION =
|
||||
"Metrics about Tables on a single HBase RegionServer";
|
||||
|
||||
/**
|
||||
* The name of the metrics context that metrics will be under in jmx
|
||||
*/
|
||||
private final static String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME;
|
||||
|
||||
private final static String GET_TIME = "getTime";
|
||||
private final static String SCAN_TIME = "scanTime";
|
||||
private final static String SCAN_SIZE = "scanSize";
|
||||
private final static String PUT_TIME = "putTime";
|
||||
private final static String PUT_BATCH_TIME = "putBatchTime";
|
||||
private final static String DELETE_TIME = "deleteTime";
|
||||
private final static String DELETE_BATCH_TIME = "deleteBatchTime";
|
||||
private final static String INCREMENT_TIME = "incrementTime";
|
||||
private final static String APPEND_TIME = "appendTime";
|
||||
private final static String CHECK_AND_DELETE_TIME = "checkAndDeleteTime";
|
||||
private final static String CHECK_AND_PUT_TIME = "checkAndPutTime";
|
||||
private final static String CHECK_AND_MUTATE_TIME = "checkAndMutateTime";
|
||||
private final static String TABLE_READ_QUERY_PER_SECOND = "tableReadQueryPerSecond";
|
||||
private final static String TABLE_WRITE_QUERY_PER_SECOND = "tableWriteQueryPerSecond";
|
||||
|
||||
private Histogram getTimeHistogram;
|
||||
private Histogram scanTimeHistogram;
|
||||
private Histogram scanSizeHistogram;
|
||||
private Histogram putTimeHistogram;
|
||||
private Histogram putBatchTimeHistogram;
|
||||
private Histogram deleteTimeHistogram;
|
||||
private Histogram deleteBatchTimeHistogram;
|
||||
private Histogram incrementTimeHistogram;
|
||||
private Histogram appendTimeHistogram;
|
||||
private Histogram checkAndDeleteTimeHistogram;
|
||||
private Histogram checkAndPutTimeHistogram;
|
||||
private Histogram checkAndMutateTimeHistogram;
|
||||
|
||||
private Meter readMeter;
|
||||
private Meter writeMeter;
|
||||
|
||||
private MetricRegistry registry;
|
||||
private TableName tableName;
|
||||
private Configuration conf;
|
||||
private MetricRegistryInfo registryInfo;
|
||||
|
||||
private boolean enableTableLatenciesMetrics;
|
||||
private boolean enabTableQueryMeterMetrics;
|
||||
|
||||
public boolean isEnableTableLatenciesMetrics() {
|
||||
return enableTableLatenciesMetrics;
|
||||
}
|
||||
|
||||
public boolean isEnabTableQueryMeterMetrics() {
|
||||
return enabTableQueryMeterMetrics;
|
||||
}
|
||||
|
||||
public MetricsTableRequests(TableName tableName, Configuration conf) {
|
||||
init(tableName, conf);
|
||||
}
|
||||
|
||||
private void init(TableName tableName, Configuration conf) {
|
||||
this.tableName = tableName;
|
||||
this.conf = conf;
|
||||
enableTableLatenciesMetrics = this.conf.getBoolean(ENABLE_TABLE_LATENCIES_METRICS_KEY,
|
||||
ENABLE_TABLE_LATENCIES_METRICS_DEFAULT);
|
||||
enabTableQueryMeterMetrics = this.conf.getBoolean(ENABLE_TABLE_QUERY_METER_METRICS_KEY,
|
||||
ENABLE_TABLE_QUERY_METER_METRICS_KEY_DEFAULT);
|
||||
if (enableTableLatenciesMetrics || enabTableQueryMeterMetrics) {
|
||||
registry = createRegistryForTableRequests();
|
||||
if (enableTableLatenciesMetrics) {
|
||||
getTimeHistogram = registry.histogram(GET_TIME);
|
||||
scanTimeHistogram = registry.histogram(SCAN_TIME);
|
||||
scanSizeHistogram = registry.histogram(SCAN_SIZE);
|
||||
putTimeHistogram = registry.histogram(PUT_TIME);
|
||||
putBatchTimeHistogram = registry.histogram(PUT_BATCH_TIME);
|
||||
deleteTimeHistogram = registry.histogram(DELETE_TIME);
|
||||
deleteBatchTimeHistogram = registry.histogram(DELETE_BATCH_TIME);
|
||||
incrementTimeHistogram = registry.histogram(INCREMENT_TIME);
|
||||
appendTimeHistogram = registry.histogram(APPEND_TIME);
|
||||
checkAndDeleteTimeHistogram = registry.histogram(CHECK_AND_DELETE_TIME);
|
||||
checkAndPutTimeHistogram = registry.histogram(CHECK_AND_PUT_TIME);
|
||||
checkAndMutateTimeHistogram = registry.histogram(CHECK_AND_MUTATE_TIME);
|
||||
}
|
||||
|
||||
if (enabTableQueryMeterMetrics) {
|
||||
readMeter = registry.meter(TABLE_READ_QUERY_PER_SECOND);
|
||||
writeMeter = registry.meter(TABLE_WRITE_QUERY_PER_SECOND);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private MetricRegistry createRegistryForTableRequests() {
|
||||
return MetricRegistries.global().create(createRegistryInfoForTableRequests());
|
||||
}
|
||||
|
||||
private MetricRegistryInfo createRegistryInfoForTableRequests() {
|
||||
registryInfo = new MetricRegistryInfo(qualifyMetrics(METRICS_NAME, tableName),
|
||||
METRICS_DESCRIPTION, qualifyMetrics(METRICS_JMX_CONTEXT, tableName), METRICS_CONTEXT, false);
|
||||
return registryInfo;
|
||||
}
|
||||
|
||||
public void removeRegistry() {
|
||||
if (enableTableLatenciesMetrics || enabTableQueryMeterMetrics) {
|
||||
MetricRegistries.global().remove(registry.getMetricRegistryInfo());
|
||||
}
|
||||
}
|
||||
|
||||
private static String qualifyMetrics(String prefix, TableName tableName) {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append(prefix).append("_");
|
||||
sb.append("Namespace_").append(tableName.getNamespaceAsString());
|
||||
sb.append("_table_").append(tableName.getQualifierAsString());
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the Put time histogram
|
||||
* @param t time it took
|
||||
*/
|
||||
public void updatePut(long t) {
|
||||
if (isEnableTableLatenciesMetrics()) {
|
||||
putTimeHistogram.update(t);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the batch Put time histogram
|
||||
* @param t time it took
|
||||
*/
|
||||
public void updatePutBatch(long t) {
|
||||
if (isEnableTableLatenciesMetrics()) {
|
||||
putBatchTimeHistogram.update(t);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the Delete time histogram
|
||||
* @param t time it took
|
||||
*/
|
||||
public void updateDelete(long t) {
|
||||
if (isEnableTableLatenciesMetrics()) {
|
||||
deleteTimeHistogram.update(t);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the batch Delete time histogram
|
||||
* @param t time it took
|
||||
*/
|
||||
public void updateDeleteBatch(long t) {
|
||||
if (isEnableTableLatenciesMetrics()) {
|
||||
deleteBatchTimeHistogram.update(t);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the Get time histogram .
|
||||
* @param t time it took
|
||||
*/
|
||||
public void updateGet(long t) {
|
||||
if (isEnableTableLatenciesMetrics()) {
|
||||
getTimeHistogram.update(t);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the Increment time histogram.
|
||||
* @param t time it took
|
||||
*/
|
||||
public void updateIncrement(long t) {
|
||||
if (isEnableTableLatenciesMetrics()) {
|
||||
incrementTimeHistogram.update(t);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the Append time histogram.
|
||||
* @param t time it took
|
||||
*/
|
||||
public void updateAppend(long t) {
|
||||
if (isEnableTableLatenciesMetrics()) {
|
||||
appendTimeHistogram.update(t);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the scan size.
|
||||
* @param scanSize size of the scan
|
||||
*/
|
||||
public void updateScanSize(long scanSize) {
|
||||
if (isEnableTableLatenciesMetrics()) {
|
||||
scanSizeHistogram.update(scanSize);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the scan time.
|
||||
* @param t time it took
|
||||
*/
|
||||
public void updateScanTime(long t) {
|
||||
if (isEnableTableLatenciesMetrics()) {
|
||||
scanTimeHistogram.update(t);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the CheckAndDelete time histogram.
|
||||
* @param time time it took
|
||||
*/
|
||||
public void updateCheckAndDelete(long time) {
|
||||
if (isEnableTableLatenciesMetrics()) {
|
||||
checkAndDeleteTimeHistogram.update(time);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the CheckAndPut time histogram.
|
||||
* @param time time it took
|
||||
*/
|
||||
public void updateCheckAndPut(long time) {
|
||||
if (isEnableTableLatenciesMetrics()) {
|
||||
checkAndPutTimeHistogram.update(time);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the CheckAndMutate time histogram.
|
||||
* @param time time it took
|
||||
*/
|
||||
public void updateCheckAndMutate(long time) {
|
||||
if (isEnableTableLatenciesMetrics()) {
|
||||
checkAndMutateTimeHistogram.update(time);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Update table read QPS
|
||||
* @param count Number of occurrences to record
|
||||
*/
|
||||
public void updateTableReadQueryMeter(long count) {
|
||||
if (isEnabTableQueryMeterMetrics()) {
|
||||
readMeter.mark(count);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Update table read QPS
|
||||
*/
|
||||
public void updateTableReadQueryMeter() {
|
||||
if (isEnabTableQueryMeterMetrics()) {
|
||||
readMeter.mark();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Update table write QPS
|
||||
* @param count Number of occurrences to record
|
||||
*/
|
||||
public void updateTableWriteQueryMeter(long count) {
|
||||
if (isEnabTableQueryMeterMetrics()) {
|
||||
writeMeter.mark(count);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Update table write QPS
|
||||
*/
|
||||
public void updateTableWriteQueryMeter() {
|
||||
if (isEnabTableQueryMeterMetrics()) {
|
||||
writeMeter.mark();
|
||||
}
|
||||
}
|
||||
|
||||
// Visible for testing
|
||||
public MetricRegistryInfo getMetricRegistryInfo() {
|
||||
return registryInfo;
|
||||
}
|
||||
}
|
|
@ -20,11 +20,13 @@ package org.apache.hadoop.hbase.regionserver;
|
|||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.CompatibilityFactory;
|
||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.regionserver.metrics.MetricsTableRequests;
|
||||
import org.apache.hadoop.hbase.test.MetricsAssertHelper;
|
||||
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
|
||||
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
||||
|
@ -146,35 +148,40 @@ public class TestMetricsRegionServer {
|
|||
|
||||
@Test
|
||||
public void testSlowCount() {
|
||||
HRegion region = mock(HRegion.class);
|
||||
MetricsTableRequests metricsTableRequests = mock(MetricsTableRequests.class);
|
||||
when(region.getMetricsTableRequests()).thenReturn(metricsTableRequests);
|
||||
when(metricsTableRequests.isEnableTableLatenciesMetrics()).thenReturn(false);
|
||||
when(metricsTableRequests.isEnabTableQueryMeterMetrics()).thenReturn(false);
|
||||
for (int i = 0; i < 12; i++) {
|
||||
rsm.updateAppend(null, 12);
|
||||
rsm.updateAppend(null, 1002);
|
||||
rsm.updateAppend(region, 12);
|
||||
rsm.updateAppend(region, 1002);
|
||||
}
|
||||
for (int i = 0; i < 13; i++) {
|
||||
rsm.updateDeleteBatch(null, 13);
|
||||
rsm.updateDeleteBatch(null, 1003);
|
||||
rsm.updateDeleteBatch(region, 13);
|
||||
rsm.updateDeleteBatch(region, 1003);
|
||||
}
|
||||
for (int i = 0; i < 14; i++) {
|
||||
rsm.updateGet(null, 14);
|
||||
rsm.updateGet(null, 1004);
|
||||
rsm.updateGet(region, 14);
|
||||
rsm.updateGet(region, 1004);
|
||||
}
|
||||
for (int i = 0; i < 15; i++) {
|
||||
rsm.updateIncrement(null, 15);
|
||||
rsm.updateIncrement(null, 1005);
|
||||
rsm.updateIncrement(region, 15);
|
||||
rsm.updateIncrement(region, 1005);
|
||||
}
|
||||
for (int i = 0; i < 16; i++) {
|
||||
rsm.updatePutBatch(null, 16);
|
||||
rsm.updatePutBatch(null, 1006);
|
||||
rsm.updatePutBatch(region, 16);
|
||||
rsm.updatePutBatch(region, 1006);
|
||||
}
|
||||
|
||||
for (int i = 0; i < 17; i++) {
|
||||
rsm.updatePut(null, 17);
|
||||
rsm.updateDelete(null, 17);
|
||||
rsm.updatePut(null, 1006);
|
||||
rsm.updateDelete(null, 1003);
|
||||
rsm.updateCheckAndDelete(null, 17);
|
||||
rsm.updateCheckAndPut(null, 17);
|
||||
rsm.updateCheckAndMutate(null, 17);
|
||||
rsm.updatePut(region, 17);
|
||||
rsm.updateDelete(region, 17);
|
||||
rsm.updatePut(region, 1006);
|
||||
rsm.updateDelete(region, 1003);
|
||||
rsm.updateCheckAndDelete(region, 17);
|
||||
rsm.updateCheckAndPut(region, 17);
|
||||
rsm.updateCheckAndMutate(region, 17);
|
||||
}
|
||||
|
||||
HELPER.assertCounter("appendNumOps", 24, serverSource);
|
||||
|
@ -276,23 +283,27 @@ public class TestMetricsRegionServer {
|
|||
|
||||
@Test
|
||||
public void testTableQueryMeterSwitch() {
|
||||
TableName tn1 = TableName.valueOf("table1");
|
||||
HRegion region = mock(HRegion.class);
|
||||
MetricsTableRequests metricsTableRequests = mock(MetricsTableRequests.class);
|
||||
when(region.getMetricsTableRequests()).thenReturn(metricsTableRequests);
|
||||
when(metricsTableRequests.isEnableTableLatenciesMetrics()).thenReturn(false);
|
||||
when(metricsTableRequests.isEnabTableQueryMeterMetrics()).thenReturn(false);
|
||||
Configuration conf = new Configuration(false);
|
||||
// disable
|
||||
rsm.updateReadQueryMeter(tn1, 500L);
|
||||
rsm.updateReadQueryMeter(region, 500L);
|
||||
assertFalse(HELPER.checkGaugeExists("ServerReadQueryPerSecond_count", serverSource));
|
||||
rsm.updateWriteQueryMeter(tn1, 500L);
|
||||
rsm.updateWriteQueryMeter(region, 500L);
|
||||
assertFalse(HELPER.checkGaugeExists("ServerWriteQueryPerSecond_count", serverSource));
|
||||
|
||||
// enable
|
||||
conf.setBoolean(MetricsRegionServer.RS_ENABLE_SERVER_QUERY_METER_METRICS_KEY, true);
|
||||
rsm = new MetricsRegionServer(wrapper, conf, null);
|
||||
serverSource = rsm.getMetricsSource();
|
||||
rsm.updateReadQueryMeter(tn1, 500L);
|
||||
rsm.updateReadQueryMeter(region, 500L);
|
||||
assertTrue(HELPER.checkGaugeExists("ServerWriteQueryPerSecond_count", serverSource));
|
||||
HELPER.assertGauge("ServerReadQueryPerSecond_count", 500L, serverSource);
|
||||
assertTrue(HELPER.checkGaugeExists("ServerWriteQueryPerSecond_count", serverSource));
|
||||
rsm.updateWriteQueryMeter(tn1, 500L);
|
||||
rsm.updateWriteQueryMeter(region, 500L);
|
||||
HELPER.assertGauge("ServerWriteQueryPerSecond_count", 500L, serverSource);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,122 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.regionserver;
|
||||
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.io.IOException;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.CompatibilityFactory;
|
||||
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
|
||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.test.MetricsAssertHelper;
|
||||
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
|
||||
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
import org.junit.experimental.categories.Category;
|
||||
|
||||
@Category({ RegionServerTests.class, SmallTests.class })
|
||||
public class TestMetricsTableLatencies {
|
||||
|
||||
@ClassRule
|
||||
public static final HBaseClassTestRule CLASS_RULE =
|
||||
HBaseClassTestRule.forClass(TestMetricsTableLatencies.class);
|
||||
|
||||
public static MetricsAssertHelper HELPER =
|
||||
CompatibilityFactory.getInstance(MetricsAssertHelper.class);
|
||||
|
||||
@Test
|
||||
public void testTableWrapperAggregateMetrics() throws IOException {
|
||||
TableName tn1 = TableName.valueOf("table1");
|
||||
TableName tn2 = TableName.valueOf("table2");
|
||||
MetricsTableLatencies latencies =
|
||||
CompatibilitySingletonFactory.getInstance(MetricsTableLatencies.class);
|
||||
assertTrue("'latencies' is actually " + latencies.getClass(),
|
||||
latencies instanceof MetricsTableLatenciesImpl);
|
||||
MetricsTableLatenciesImpl latenciesImpl = (MetricsTableLatenciesImpl) latencies;
|
||||
RegionServerTableMetrics tableMetrics = new RegionServerTableMetrics(false);
|
||||
|
||||
// Metrics to each table should be disjoint
|
||||
// N.B. each call to assertGauge removes all previously acquired metrics so we have to
|
||||
// make the metrics call and then immediately verify it. Trying to do multiple metrics
|
||||
// updates followed by multiple verifications will fail on the 2nd verification (as the
|
||||
// first verification cleaned the data structures in MetricsAssertHelperImpl).
|
||||
tableMetrics.updateGet(tn1, 500L);
|
||||
HELPER.assertGauge(MetricsTableLatenciesImpl.qualifyMetricsName(tn1,
|
||||
MetricsTableLatencies.GET_TIME + "_" + "999th_percentile"), 500L, latenciesImpl);
|
||||
tableMetrics.updatePut(tn1, 50L);
|
||||
HELPER.assertGauge(MetricsTableLatenciesImpl.qualifyMetricsName(tn1,
|
||||
MetricsTableLatencies.PUT_TIME + "_" + "99th_percentile"), 50L, latenciesImpl);
|
||||
|
||||
tableMetrics.updateGet(tn2, 300L);
|
||||
HELPER.assertGauge(MetricsTableLatenciesImpl.qualifyMetricsName(tn2,
|
||||
MetricsTableLatencies.GET_TIME + "_" + "999th_percentile"), 300L, latenciesImpl);
|
||||
tableMetrics.updatePut(tn2, 75L);
|
||||
HELPER.assertGauge(MetricsTableLatenciesImpl.qualifyMetricsName(tn2,
|
||||
MetricsTableLatencies.PUT_TIME + "_" + "99th_percentile"), 75L, latenciesImpl);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testTableQueryMeterSwitch() {
|
||||
TableName tn1 = TableName.valueOf("table1");
|
||||
MetricsTableLatencies latencies =
|
||||
CompatibilitySingletonFactory.getInstance(MetricsTableLatencies.class);
|
||||
assertTrue("'latencies' is actually " + latencies.getClass(),
|
||||
latencies instanceof MetricsTableLatenciesImpl);
|
||||
MetricsTableLatenciesImpl latenciesImpl = (MetricsTableLatenciesImpl) latencies;
|
||||
|
||||
Configuration conf = new Configuration();
|
||||
boolean enableTableQueryMeter =
|
||||
conf.getBoolean(MetricsRegionServer.RS_ENABLE_TABLE_QUERY_METER_METRICS_KEY,
|
||||
MetricsRegionServer.RS_ENABLE_TABLE_QUERY_METER_METRICS_KEY_DEFAULT);
|
||||
// disable
|
||||
assertFalse(enableTableQueryMeter);
|
||||
RegionServerTableMetrics tableMetrics = new RegionServerTableMetrics(enableTableQueryMeter);
|
||||
tableMetrics.updateTableReadQueryMeter(tn1, 500L);
|
||||
assertFalse(HELPER.checkGaugeExists(MetricsTableLatenciesImpl.qualifyMetricsName(tn1,
|
||||
MetricsTableQueryMeterImpl.TABLE_READ_QUERY_PER_SECOND + "_" + "count"), latenciesImpl));
|
||||
tableMetrics.updateTableWriteQueryMeter(tn1, 500L);
|
||||
assertFalse(HELPER.checkGaugeExists(MetricsTableLatenciesImpl.qualifyMetricsName(tn1,
|
||||
MetricsTableQueryMeterImpl.TABLE_WRITE_QUERY_PER_SECOND + "_" + "count"), latenciesImpl));
|
||||
|
||||
// enable
|
||||
conf.setBoolean(MetricsRegionServer.RS_ENABLE_TABLE_QUERY_METER_METRICS_KEY, true);
|
||||
enableTableQueryMeter =
|
||||
conf.getBoolean(MetricsRegionServer.RS_ENABLE_TABLE_QUERY_METER_METRICS_KEY,
|
||||
MetricsRegionServer.RS_ENABLE_TABLE_QUERY_METER_METRICS_KEY_DEFAULT);
|
||||
assertTrue(enableTableQueryMeter);
|
||||
tableMetrics = new RegionServerTableMetrics(true);
|
||||
tableMetrics.updateTableReadQueryMeter(tn1, 500L);
|
||||
assertTrue(HELPER.checkGaugeExists(MetricsTableLatenciesImpl.qualifyMetricsName(tn1,
|
||||
MetricsTableQueryMeterImpl.TABLE_READ_QUERY_PER_SECOND + "_" + "count"), latenciesImpl));
|
||||
HELPER.assertGauge(
|
||||
MetricsTableLatenciesImpl.qualifyMetricsName(tn1,
|
||||
MetricsTableQueryMeterImpl.TABLE_READ_QUERY_PER_SECOND + "_" + "count"),
|
||||
500L, latenciesImpl);
|
||||
tableMetrics.updateTableWriteQueryMeter(tn1, 500L);
|
||||
assertTrue(HELPER.checkGaugeExists(MetricsTableLatenciesImpl.qualifyMetricsName(tn1,
|
||||
MetricsTableQueryMeterImpl.TABLE_WRITE_QUERY_PER_SECOND + "_" + "count"), latenciesImpl));
|
||||
HELPER.assertGauge(
|
||||
MetricsTableLatenciesImpl.qualifyMetricsName(tn1,
|
||||
MetricsTableQueryMeterImpl.TABLE_WRITE_QUERY_PER_SECOND + "_" + "count"),
|
||||
500L, latenciesImpl);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,126 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.regionserver;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.util.Optional;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.metrics.Metric;
|
||||
import org.apache.hadoop.hbase.metrics.MetricRegistries;
|
||||
import org.apache.hadoop.hbase.metrics.MetricRegistry;
|
||||
import org.apache.hadoop.hbase.metrics.MetricRegistryInfo;
|
||||
import org.apache.hadoop.hbase.metrics.Snapshot;
|
||||
import org.apache.hadoop.hbase.metrics.impl.DropwizardMeter;
|
||||
import org.apache.hadoop.hbase.metrics.impl.HistogramImpl;
|
||||
import org.apache.hadoop.hbase.regionserver.metrics.MetricsTableRequests;
|
||||
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
|
||||
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
import org.junit.experimental.categories.Category;
|
||||
|
||||
@Category({ RegionServerTests.class, SmallTests.class })
|
||||
public class TestMetricsTableRequests {
|
||||
|
||||
@ClassRule
|
||||
public static final HBaseClassTestRule CLASS_RULE =
|
||||
HBaseClassTestRule.forClass(TestMetricsTableRequests.class);
|
||||
|
||||
@Test
|
||||
public void testMetricsTableLatencies() {
|
||||
TableName tn1 = TableName.valueOf("table1");
|
||||
TableName tn2 = TableName.valueOf("table2");
|
||||
MetricsTableRequests requests1 = new MetricsTableRequests(tn1, new Configuration());
|
||||
MetricsTableRequests requests2 = new MetricsTableRequests(tn2, new Configuration());
|
||||
assertTrue("'requests' is actually " + requests1.getClass(),
|
||||
requests1 instanceof MetricsTableRequests);
|
||||
assertTrue("'requests' is actually " + requests2.getClass(),
|
||||
requests2 instanceof MetricsTableRequests);
|
||||
|
||||
MetricRegistryInfo info1 = requests1.getMetricRegistryInfo();
|
||||
MetricRegistryInfo info2 = requests2.getMetricRegistryInfo();
|
||||
Optional<MetricRegistry> registry1 = MetricRegistries.global().get(info1);
|
||||
assertTrue(registry1.isPresent());
|
||||
Optional<MetricRegistry> registry2 = MetricRegistries.global().get(info2);
|
||||
assertTrue(registry2.isPresent());
|
||||
|
||||
requests1.updateGet(500L);
|
||||
Snapshot latencies1SnapshotGet =
|
||||
((HistogramImpl) registry1.get().get("getTime").get()).snapshot();
|
||||
assertEquals(500, latencies1SnapshotGet.get999thPercentile());
|
||||
|
||||
requests1.updatePut(50L);
|
||||
Snapshot latencies1SnapshotPut =
|
||||
((HistogramImpl) registry1.get().get("putTime").get()).snapshot();
|
||||
assertEquals(50, latencies1SnapshotPut.get99thPercentile());
|
||||
|
||||
requests2.updateGet(300L);
|
||||
Snapshot latencies2SnapshotGet =
|
||||
((HistogramImpl) registry2.get().get("getTime").get()).snapshot();
|
||||
assertEquals(300, latencies2SnapshotGet.get999thPercentile());
|
||||
|
||||
requests2.updatePut(75L);
|
||||
Snapshot latencies2SnapshotPut =
|
||||
((HistogramImpl) registry2.get().get("putTime").get()).snapshot();
|
||||
assertEquals(75, latencies2SnapshotPut.get99thPercentile());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testTableQueryMeterSwitch() {
|
||||
TableName tn1 = TableName.valueOf("table1");
|
||||
Configuration conf = new Configuration();
|
||||
boolean enableTableQueryMeter =
|
||||
conf.getBoolean(MetricsTableRequests.ENABLE_TABLE_QUERY_METER_METRICS_KEY,
|
||||
MetricsTableRequests.ENABLE_TABLE_QUERY_METER_METRICS_KEY_DEFAULT);
|
||||
// disable
|
||||
assertFalse(enableTableQueryMeter);
|
||||
MetricsTableRequests requests = new MetricsTableRequests(tn1, conf);
|
||||
assertTrue("'requests' is actually " + requests.getClass(),
|
||||
requests instanceof MetricsTableRequests);
|
||||
|
||||
MetricRegistryInfo info = requests.getMetricRegistryInfo();
|
||||
Optional<MetricRegistry> registry = MetricRegistries.global().get(info);
|
||||
assertTrue(registry.isPresent());
|
||||
requests.updateTableReadQueryMeter(500L);
|
||||
Optional<Metric> read = registry.get().get("tableReadQueryPerSecond");
|
||||
assertFalse(read.isPresent());
|
||||
|
||||
// enable
|
||||
conf.setBoolean(MetricsTableRequests.ENABLE_TABLE_QUERY_METER_METRICS_KEY, true);
|
||||
enableTableQueryMeter =
|
||||
conf.getBoolean(MetricsTableRequests.ENABLE_TABLE_QUERY_METER_METRICS_KEY,
|
||||
MetricsTableRequests.ENABLE_TABLE_QUERY_METER_METRICS_KEY_DEFAULT);
|
||||
assertTrue(enableTableQueryMeter);
|
||||
requests = new MetricsTableRequests(tn1, conf);
|
||||
assertTrue("'requests' is actually " + requests.getClass(),
|
||||
requests instanceof MetricsTableRequests);
|
||||
|
||||
info = requests.getMetricRegistryInfo();
|
||||
registry = MetricRegistries.global().get(info);
|
||||
assertTrue(registry.isPresent());
|
||||
requests.updateTableReadQueryMeter(500L);
|
||||
read = registry.get().get("tableReadQueryPerSecond");
|
||||
assertTrue(read.isPresent());
|
||||
assertEquals(((DropwizardMeter) read.get()).getCount(), 500);
|
||||
}
|
||||
}
|
|
@ -19,6 +19,8 @@ package org.apache.hadoop.hbase.regionserver;
|
|||
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
import java.security.PrivilegedAction;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -26,6 +28,7 @@ import org.apache.hadoop.hbase.CompatibilityFactory;
|
|||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.regionserver.metrics.MetricsTableRequests;
|
||||
import org.apache.hadoop.hbase.security.User;
|
||||
import org.apache.hadoop.hbase.test.MetricsAssertHelper;
|
||||
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
||||
|
@ -65,23 +68,28 @@ public class TestMetricsUserAggregate {
|
|||
}
|
||||
|
||||
private void doOperations() {
|
||||
HRegion region = mock(HRegion.class);
|
||||
MetricsTableRequests metricsTableRequests = mock(MetricsTableRequests.class);
|
||||
when(region.getMetricsTableRequests()).thenReturn(metricsTableRequests);
|
||||
when(metricsTableRequests.isEnableTableLatenciesMetrics()).thenReturn(false);
|
||||
when(metricsTableRequests.isEnabTableQueryMeterMetrics()).thenReturn(false);
|
||||
for (int i = 0; i < 10; i++) {
|
||||
rsm.updateGet(tableName, 10);
|
||||
rsm.updateGet(region, 10);
|
||||
}
|
||||
for (int i = 0; i < 11; i++) {
|
||||
rsm.updateScanTime(tableName, 11);
|
||||
rsm.updateScanTime(region, 11);
|
||||
}
|
||||
for (int i = 0; i < 12; i++) {
|
||||
rsm.updatePut(tableName, 12);
|
||||
rsm.updatePut(region, 12);
|
||||
}
|
||||
for (int i = 0; i < 13; i++) {
|
||||
rsm.updateDelete(tableName, 13);
|
||||
rsm.updateDelete(region, 13);
|
||||
}
|
||||
for (int i = 0; i < 14; i++) {
|
||||
rsm.updateIncrement(tableName, 14);
|
||||
rsm.updateIncrement(region, 14);
|
||||
}
|
||||
for (int i = 0; i < 15; i++) {
|
||||
rsm.updateAppend(tableName, 15);
|
||||
rsm.updateAppend(region, 15);
|
||||
}
|
||||
for (int i = 0; i < 16; i++) {
|
||||
rsm.updateReplay(16);
|
||||
|
@ -150,7 +158,12 @@ public class TestMetricsUserAggregate {
|
|||
.doAs(new PrivilegedAction<Void>() {
|
||||
@Override
|
||||
public Void run() {
|
||||
rsm.updateGet(tableName, 10);
|
||||
HRegion region = mock(HRegion.class);
|
||||
MetricsTableRequests metricsTableRequests = mock(MetricsTableRequests.class);
|
||||
when(region.getMetricsTableRequests()).thenReturn(metricsTableRequests);
|
||||
when(metricsTableRequests.isEnableTableLatenciesMetrics()).thenReturn(false);
|
||||
when(metricsTableRequests.isEnabTableQueryMeterMetrics()).thenReturn(false);
|
||||
rsm.updateGet(region, 10);
|
||||
return null;
|
||||
}
|
||||
});
|
||||
|
|
Loading…
Reference in New Issue