diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableQueryMeter.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableQueryMeter.java new file mode 100644 index 00000000000..d0085ffb4a8 --- /dev/null +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableQueryMeter.java @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import org.apache.hadoop.hbase.TableName; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * Query Per Second for each table in a RegionServer. + */ +@InterfaceAudience.Private +public interface MetricsTableQueryMeter { + + /** + * Update table read QPS + * @param tableName The table the metric is for + * @param count Number of occurrences to record + */ + void updateTableReadQueryMeter(TableName tableName, long count); + + /** + * Update table read QPS + * @param tableName The table the metric is for + */ + void updateTableReadQueryMeter(TableName tableName); + + /** + * Update table write QPS + * @param tableName The table the metric is for + * @param count Number of occurrences to record + */ + void updateTableWriteQueryMeter(TableName tableName, long count); + + /** + * Update table write QPS + * @param tableName The table the metric is for + */ + void updateTableWriteQueryMeter(TableName tableName); +} diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatenciesImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatenciesImpl.java index 980388f5479..5a3f3b9d249 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatenciesImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatenciesImpl.java @@ -20,6 +20,8 @@ import java.util.HashMap; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; import org.apache.hadoop.metrics2.MetricHistogram; +import org.apache.hadoop.metrics2.MetricsCollector; +import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.lib.DynamicMetricsRegistry; import org.apache.yetus.audience.InterfaceAudience; @@ -171,4 +173,15 @@ public class MetricsTableLatenciesImpl extends BaseSourceImpl implements Metrics public void updateScanTime(String tableName, long t) { getOrCreateTableHistogram(tableName).updateScanTime(t); } + + @Override + public void getMetrics(MetricsCollector metricsCollector, boolean all) { + MetricsRecordBuilder mrb = metricsCollector.addRecord(metricsName); + // source is registered in supers constructor, sometimes called before the whole initialization. + metricsRegistry.snapshot(mrb, all); + if (metricsAdapter != null) { + // snapshot MetricRegistry as well + metricsAdapter.snapshotAllMetrics(registry, mrb); + } + } } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableQueryMeterImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableQueryMeterImpl.java new file mode 100644 index 00000000000..438310ff1d4 --- /dev/null +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableQueryMeterImpl.java @@ -0,0 +1,102 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.metrics.Meter; +import org.apache.hadoop.hbase.metrics.MetricRegistry; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * Implementation of {@link MetricsTableQueryMeter} to track query per second for each table in + * a RegionServer. + */ +@InterfaceAudience.Private +public class MetricsTableQueryMeterImpl implements MetricsTableQueryMeter { + private final Map metersByTable = new ConcurrentHashMap<>(); + private final MetricRegistry metricRegistry; + + private final static String TABLE_READ_QUERY_PER_SECOND = "tableReadQueryPerSecond"; + private final static String TABLE_WRITE_QUERY_PER_SECOND = "tableWriteQueryPerSecond"; + + public MetricsTableQueryMeterImpl(MetricRegistry metricRegistry) { + this.metricRegistry = metricRegistry; + } + + private static class TableMeters { + final Meter tableReadQueryMeter; + final Meter tableWriteQueryMeter; + + TableMeters(MetricRegistry metricRegistry, TableName tableName) { + this.tableReadQueryMeter = metricRegistry.meter(qualifyMetricsName(tableName, + TABLE_READ_QUERY_PER_SECOND)); + this.tableWriteQueryMeter = + metricRegistry.meter(qualifyMetricsName(tableName, TABLE_WRITE_QUERY_PER_SECOND)); + } + + public void updateTableReadQueryMeter(long count) { + tableReadQueryMeter.mark(count); + } + + public void updateTableReadQueryMeter() { + tableReadQueryMeter.mark(); + } + + public void updateTableWriteQueryMeter(long count) { + tableWriteQueryMeter.mark(count); + } + + public void updateTableWriteQueryMeter() { + tableWriteQueryMeter.mark(); + } + } + + private static String qualifyMetricsName(TableName tableName, String metric) { + StringBuilder sb = new StringBuilder(); + sb.append("Namespace_").append(tableName.getNamespaceAsString()); + sb.append("_table_").append(tableName.getQualifierAsString()); + sb.append("_metric_").append(metric); + return sb.toString(); + } + + private TableMeters getOrCreateTableMeter(TableName tableName) { + return metersByTable.computeIfAbsent(tableName, tbn -> new TableMeters(metricRegistry, tbn)); + } + + @Override + public void updateTableReadQueryMeter(TableName tableName, long count) { + getOrCreateTableMeter(tableName).updateTableReadQueryMeter(count); + } + + @Override + public void updateTableReadQueryMeter(TableName tableName) { + getOrCreateTableMeter(tableName).updateTableReadQueryMeter(); + } + + @Override + public void updateTableWriteQueryMeter(TableName tableName, long count) { + getOrCreateTableMeter(tableName).updateTableWriteQueryMeter(count); + } + + @Override + public void updateTableWriteQueryMeter(TableName tableName) { + getOrCreateTableMeter(tableName).updateTableWriteQueryMeter(); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index b27794e5bf4..a9ff44072f9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -4083,6 +4083,10 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi requestFlushIfNeeded(); } } finally { + if (rsServices != null && rsServices.getMetrics() != null) { + rsServices.getMetrics().updateWriteQueryMeter(this.htableDescriptor. + getTableName(), batchOp.size()); + } batchOp.closeRegionOperation(); } return batchOp.retCodeDetails; @@ -6613,6 +6617,9 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi if (!outResults.isEmpty()) { readRequestsCount.increment(); } + if (rsServices != null && rsServices.getMetrics() != null) { + rsServices.getMetrics().updateReadQueryMeter(getRegionInfo().getTable()); + } // If the size limit was reached it means a partial Result is being returned. Returning a // partial Result means that we should not reset the filters; filters should only be reset in @@ -7742,6 +7749,11 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi // STEP 11. Release row lock(s) releaseRowLocks(acquiredRowLocks); + + if (rsServices != null && rsServices.getMetrics() != null) { + rsServices.getMetrics().updateWriteQueryMeter(this.htableDescriptor. + getTableName(), mutations.size()); + } } success = true; } finally { @@ -7897,6 +7909,10 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi rsServices.getNonceManager().addMvccToOperationContext(nonceGroup, nonce, writeEntry.getWriteNumber()); } + if (rsServices != null && rsServices.getMetrics() != null) { + rsServices.getMetrics().updateWriteQueryMeter(this.htableDescriptor. + getTableName()); + } writeEntry = null; } finally { this.updatesLock.readLock().unlock(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java index 56135df253a..0738f5e9b2b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.regionserver; import org.apache.hadoop.hbase.CompatibilitySingletonFactory; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.metrics.Meter; import org.apache.hadoop.hbase.metrics.MetricRegistries; import org.apache.hadoop.hbase.metrics.MetricRegistry; import org.apache.hadoop.hbase.metrics.Timer; @@ -51,6 +52,8 @@ public class MetricsRegionServer { private MetricRegistry metricRegistry; private Timer bulkLoadTimer; + private Meter serverReadQueryMeter; + private Meter serverWriteQueryMeter; public MetricsRegionServer(MetricsRegionServerWrapper regionServerWrapper, Configuration conf, MetricsTable metricsTable) { @@ -68,6 +71,8 @@ public class MetricsRegionServer { bulkLoadTimer = metricRegistry.timer("Bulkload"); quotaSource = CompatibilitySingletonFactory.getInstance(MetricsRegionServerQuotaSource.class); + serverReadQueryMeter = metricRegistry.meter("ServerReadQueryPerSecond"); + serverWriteQueryMeter = metricRegistry.meter("ServerWriteQueryPerSecond"); } MetricsRegionServer(MetricsRegionServerWrapper regionServerWrapper, @@ -248,4 +253,32 @@ public class MetricsRegionServer { public void incrementRegionSizeReportingChoreTime(long time) { quotaSource.incrementRegionSizeReportingChoreTime(time); } + + public void updateReadQueryMeter(TableName tn, long count) { + if (tableMetrics != null && tn != null) { + tableMetrics.updateTableReadQueryMeter(tn, count); + } + this.serverReadQueryMeter.mark(count); + } + + public void updateReadQueryMeter(TableName tn) { + if (tableMetrics != null && tn != null) { + tableMetrics.updateTableReadQueryMeter(tn); + } + this.serverReadQueryMeter.mark(); + } + + public void updateWriteQueryMeter(TableName tn, long count) { + if (tableMetrics != null && tn != null) { + tableMetrics.updateTableWriteQueryMeter(tn, count); + } + this.serverWriteQueryMeter.mark(count); + } + + public void updateWriteQueryMeter(TableName tn) { + if (tableMetrics != null && tn != null) { + tableMetrics.updateTableWriteQueryMeter(tn); + } + this.serverWriteQueryMeter.mark(); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerTableMetrics.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerTableMetrics.java index 652a0620148..ec6c0493bb7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerTableMetrics.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerTableMetrics.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.regionserver; import org.apache.hadoop.hbase.CompatibilitySingletonFactory; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.metrics.MetricRegistries; import org.apache.yetus.audience.InterfaceAudience; /** @@ -28,9 +29,12 @@ import org.apache.yetus.audience.InterfaceAudience; public class RegionServerTableMetrics { private final MetricsTableLatencies latencies; + private final MetricsTableQueryMeter queryMeter; public RegionServerTableMetrics() { latencies = CompatibilitySingletonFactory.getInstance(MetricsTableLatencies.class); + queryMeter = new MetricsTableQueryMeterImpl(MetricRegistries.global(). + get(((MetricsTableLatenciesImpl) latencies).getMetricRegistryInfo()).get()); } public void updatePut(TableName table, long time) { @@ -68,4 +72,20 @@ public class RegionServerTableMetrics { public void updateScanSize(TableName table, long size) { latencies.updateScanSize(table.getNameAsString(), size); } + + public void updateTableReadQueryMeter(TableName table, long count) { + queryMeter.updateTableReadQueryMeter(table, count); + } + + public void updateTableReadQueryMeter(TableName table) { + queryMeter.updateTableReadQueryMeter(table); + } + + public void updateTableWriteQueryMeter(TableName table, long count) { + queryMeter.updateTableWriteQueryMeter(table, count); + } + + public void updateTableWriteQueryMeter(TableName table) { + queryMeter.updateTableWriteQueryMeter(table); + } }