HBASE-22975 Add read and write QPS metrics at server level and table level (#615)

Signed-off-by Reid Chan <reidchan@apache.org>
This commit is contained in:
zbq.dean 2019-09-23 12:51:25 +08:00 committed by Reid Chan
parent 537c5f0660
commit a8e3d23cca
6 changed files with 237 additions and 0 deletions

View File

@ -0,0 +1,53 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to you under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;
import org.apache.hadoop.hbase.TableName;
import org.apache.yetus.audience.InterfaceAudience;
/**
* Query Per Second for each table in a RegionServer.
*/
@InterfaceAudience.Private
public interface MetricsTableQueryMeter {
/**
* Update table read QPS
* @param tableName The table the metric is for
* @param count Number of occurrences to record
*/
void updateTableReadQueryMeter(TableName tableName, long count);
/**
* Update table read QPS
* @param tableName The table the metric is for
*/
void updateTableReadQueryMeter(TableName tableName);
/**
* Update table write QPS
* @param tableName The table the metric is for
* @param count Number of occurrences to record
*/
void updateTableWriteQueryMeter(TableName tableName, long count);
/**
* Update table write QPS
* @param tableName The table the metric is for
*/
void updateTableWriteQueryMeter(TableName tableName);
}

View File

@ -20,6 +20,8 @@ import java.util.HashMap;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
import org.apache.hadoop.metrics2.MetricHistogram;
import org.apache.hadoop.metrics2.MetricsCollector;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.lib.DynamicMetricsRegistry;
import org.apache.yetus.audience.InterfaceAudience;
@ -171,4 +173,15 @@ public class MetricsTableLatenciesImpl extends BaseSourceImpl implements Metrics
public void updateScanTime(String tableName, long t) {
getOrCreateTableHistogram(tableName).updateScanTime(t);
}
@Override
public void getMetrics(MetricsCollector metricsCollector, boolean all) {
MetricsRecordBuilder mrb = metricsCollector.addRecord(metricsName);
// source is registered in supers constructor, sometimes called before the whole initialization.
metricsRegistry.snapshot(mrb, all);
if (metricsAdapter != null) {
// snapshot MetricRegistry as well
metricsAdapter.snapshotAllMetrics(registry, mrb);
}
}
}

View File

@ -0,0 +1,102 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to you under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.metrics.Meter;
import org.apache.hadoop.hbase.metrics.MetricRegistry;
import org.apache.yetus.audience.InterfaceAudience;
/**
* Implementation of {@link MetricsTableQueryMeter} to track query per second for each table in
* a RegionServer.
*/
@InterfaceAudience.Private
public class MetricsTableQueryMeterImpl implements MetricsTableQueryMeter {
private final Map<TableName, TableMeters> metersByTable = new ConcurrentHashMap<>();
private final MetricRegistry metricRegistry;
private final static String TABLE_READ_QUERY_PER_SECOND = "tableReadQueryPerSecond";
private final static String TABLE_WRITE_QUERY_PER_SECOND = "tableWriteQueryPerSecond";
public MetricsTableQueryMeterImpl(MetricRegistry metricRegistry) {
this.metricRegistry = metricRegistry;
}
private static class TableMeters {
final Meter tableReadQueryMeter;
final Meter tableWriteQueryMeter;
TableMeters(MetricRegistry metricRegistry, TableName tableName) {
this.tableReadQueryMeter = metricRegistry.meter(qualifyMetricsName(tableName,
TABLE_READ_QUERY_PER_SECOND));
this.tableWriteQueryMeter =
metricRegistry.meter(qualifyMetricsName(tableName, TABLE_WRITE_QUERY_PER_SECOND));
}
public void updateTableReadQueryMeter(long count) {
tableReadQueryMeter.mark(count);
}
public void updateTableReadQueryMeter() {
tableReadQueryMeter.mark();
}
public void updateTableWriteQueryMeter(long count) {
tableWriteQueryMeter.mark(count);
}
public void updateTableWriteQueryMeter() {
tableWriteQueryMeter.mark();
}
}
private static String qualifyMetricsName(TableName tableName, String metric) {
StringBuilder sb = new StringBuilder();
sb.append("Namespace_").append(tableName.getNamespaceAsString());
sb.append("_table_").append(tableName.getQualifierAsString());
sb.append("_metric_").append(metric);
return sb.toString();
}
private TableMeters getOrCreateTableMeter(TableName tableName) {
return metersByTable.computeIfAbsent(tableName, tbn -> new TableMeters(metricRegistry, tbn));
}
@Override
public void updateTableReadQueryMeter(TableName tableName, long count) {
getOrCreateTableMeter(tableName).updateTableReadQueryMeter(count);
}
@Override
public void updateTableReadQueryMeter(TableName tableName) {
getOrCreateTableMeter(tableName).updateTableReadQueryMeter();
}
@Override
public void updateTableWriteQueryMeter(TableName tableName, long count) {
getOrCreateTableMeter(tableName).updateTableWriteQueryMeter(count);
}
@Override
public void updateTableWriteQueryMeter(TableName tableName) {
getOrCreateTableMeter(tableName).updateTableWriteQueryMeter();
}
}

View File

@ -4083,6 +4083,10 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
requestFlushIfNeeded();
}
} finally {
if (rsServices != null && rsServices.getMetrics() != null) {
rsServices.getMetrics().updateWriteQueryMeter(this.htableDescriptor.
getTableName(), batchOp.size());
}
batchOp.closeRegionOperation();
}
return batchOp.retCodeDetails;
@ -6613,6 +6617,9 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
if (!outResults.isEmpty()) {
readRequestsCount.increment();
}
if (rsServices != null && rsServices.getMetrics() != null) {
rsServices.getMetrics().updateReadQueryMeter(getRegionInfo().getTable());
}
// If the size limit was reached it means a partial Result is being returned. Returning a
// partial Result means that we should not reset the filters; filters should only be reset in
@ -7742,6 +7749,11 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
// STEP 11. Release row lock(s)
releaseRowLocks(acquiredRowLocks);
if (rsServices != null && rsServices.getMetrics() != null) {
rsServices.getMetrics().updateWriteQueryMeter(this.htableDescriptor.
getTableName(), mutations.size());
}
}
success = true;
} finally {
@ -7897,6 +7909,10 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
rsServices.getNonceManager().addMvccToOperationContext(nonceGroup, nonce,
writeEntry.getWriteNumber());
}
if (rsServices != null && rsServices.getMetrics() != null) {
rsServices.getMetrics().updateWriteQueryMeter(this.htableDescriptor.
getTableName());
}
writeEntry = null;
} finally {
this.updatesLock.readLock().unlock();

View File

@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.regionserver;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.metrics.Meter;
import org.apache.hadoop.hbase.metrics.MetricRegistries;
import org.apache.hadoop.hbase.metrics.MetricRegistry;
import org.apache.hadoop.hbase.metrics.Timer;
@ -51,6 +52,8 @@ public class MetricsRegionServer {
private MetricRegistry metricRegistry;
private Timer bulkLoadTimer;
private Meter serverReadQueryMeter;
private Meter serverWriteQueryMeter;
public MetricsRegionServer(MetricsRegionServerWrapper regionServerWrapper, Configuration conf,
MetricsTable metricsTable) {
@ -68,6 +71,8 @@ public class MetricsRegionServer {
bulkLoadTimer = metricRegistry.timer("Bulkload");
quotaSource = CompatibilitySingletonFactory.getInstance(MetricsRegionServerQuotaSource.class);
serverReadQueryMeter = metricRegistry.meter("ServerReadQueryPerSecond");
serverWriteQueryMeter = metricRegistry.meter("ServerWriteQueryPerSecond");
}
MetricsRegionServer(MetricsRegionServerWrapper regionServerWrapper,
@ -248,4 +253,32 @@ public class MetricsRegionServer {
public void incrementRegionSizeReportingChoreTime(long time) {
quotaSource.incrementRegionSizeReportingChoreTime(time);
}
public void updateReadQueryMeter(TableName tn, long count) {
if (tableMetrics != null && tn != null) {
tableMetrics.updateTableReadQueryMeter(tn, count);
}
this.serverReadQueryMeter.mark(count);
}
public void updateReadQueryMeter(TableName tn) {
if (tableMetrics != null && tn != null) {
tableMetrics.updateTableReadQueryMeter(tn);
}
this.serverReadQueryMeter.mark();
}
public void updateWriteQueryMeter(TableName tn, long count) {
if (tableMetrics != null && tn != null) {
tableMetrics.updateTableWriteQueryMeter(tn, count);
}
this.serverWriteQueryMeter.mark(count);
}
public void updateWriteQueryMeter(TableName tn) {
if (tableMetrics != null && tn != null) {
tableMetrics.updateTableWriteQueryMeter(tn);
}
this.serverWriteQueryMeter.mark();
}
}

View File

@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.regionserver;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.metrics.MetricRegistries;
import org.apache.yetus.audience.InterfaceAudience;
/**
@ -28,9 +29,12 @@ import org.apache.yetus.audience.InterfaceAudience;
public class RegionServerTableMetrics {
private final MetricsTableLatencies latencies;
private final MetricsTableQueryMeter queryMeter;
public RegionServerTableMetrics() {
latencies = CompatibilitySingletonFactory.getInstance(MetricsTableLatencies.class);
queryMeter = new MetricsTableQueryMeterImpl(MetricRegistries.global().
get(((MetricsTableLatenciesImpl) latencies).getMetricRegistryInfo()).get());
}
public void updatePut(TableName table, long time) {
@ -68,4 +72,20 @@ public class RegionServerTableMetrics {
public void updateScanSize(TableName table, long size) {
latencies.updateScanSize(table.getNameAsString(), size);
}
public void updateTableReadQueryMeter(TableName table, long count) {
queryMeter.updateTableReadQueryMeter(table, count);
}
public void updateTableReadQueryMeter(TableName table) {
queryMeter.updateTableReadQueryMeter(table);
}
public void updateTableWriteQueryMeter(TableName table, long count) {
queryMeter.updateTableWriteQueryMeter(table, count);
}
public void updateTableWriteQueryMeter(TableName table) {
queryMeter.updateTableWriteQueryMeter(table);
}
}