HBASE-19285 Implements table-level latency histograms

For a egionserver's view of a table (the regions
that belong to a table hosted on a regionserver),
this change tracks the latencies of operations that
affect the regions for this table.

Tracking at the per-table level avoids the memory bloat
and performance impact that accompanied the previous
per-region latency metrics while still providing important
details for operators to consume.

Signed-Off-By: Andrew Purtell <apurtell@apache.org>
This commit is contained in:
Josh Elser 2017-11-17 13:39:43 -05:00
parent 680a534acd
commit 835d15bf97
11 changed files with 575 additions and 42 deletions

View File

@ -0,0 +1,125 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to you under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;
/**
* Latency metrics for a specific table in a RegionServer.
*/
public interface MetricsTableLatencies {
/**
* The name of the metrics
*/
String METRICS_NAME = "TableLatencies";
/**
* The name of the metrics context that metrics will be under.
*/
String METRICS_CONTEXT = "regionserver";
/**
* Description
*/
String METRICS_DESCRIPTION = "Metrics about Tables on a single HBase RegionServer";
/**
* The name of the metrics context that metrics will be under in jmx
*/
String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME;
String GET_TIME = "getTime";
String SCAN_TIME = "scanTime";
String SCAN_SIZE = "scanSize";
String PUT_TIME = "putTime";
String PUT_BATCH_TIME = "putBatchTime";
String DELETE_TIME = "deleteTime";
String DELETE_BATCH_TIME = "deleteBatchTime";
String INCREMENT_TIME = "incrementTime";
String APPEND_TIME = "appendTime";
/**
* Update the Put time histogram
*
* @param tableName The table the metric is for
* @param t time it took
*/
void updatePut(String tableName, long t);
/**
* Update the batch Put time histogram
*
* @param tableName The table the metric is for
* @param t time it took
*/
void updatePutBatch(String tableName, long t);
/**
* Update the Delete time histogram
*
* @param tableName The table the metric is for
* @param t time it took
*/
void updateDelete(String tableName, long t);
/**
* Update the batch Delete time histogram
*
* @param tableName The table the metric is for
* @param t time it took
*/
void updateDeleteBatch(String tableName, long t);
/**
* Update the Get time histogram .
*
* @param tableName The table the metric is for
* @param t time it took
*/
void updateGet(String tableName, long t);
/**
* Update the Increment time histogram.
*
* @param tableName The table the metric is for
* @param t time it took
*/
void updateIncrement(String tableName, long t);
/**
* Update the Append time histogram.
*
* @param tableName The table the metric is for
* @param t time it took
*/
void updateAppend(String tableName, long t);
/**
* Update the scan size.
*
* @param tableName The table the metric is for
* @param scanSize size of the scan
*/
void updateScanSize(String tableName, long scanSize);
/**
* Update the scan time.
*
* @param tableName The table the metric is for
* @param t time it took
*/
void updateScanTime(String tableName, long t);
}

View File

@ -168,4 +168,11 @@ public interface MetricsAssertHelper {
* @return long value of the gauge.
*/
long getGaugeLong(String name, BaseSource source);
/**
* Generates a representation of all metrics exported by the given {@code source}.
* @param source The {@link BaseSource} that will provide the metrics.
* @return A representation of the metrics as a String.
*/
String toDebugString(BaseSource source);
}

View File

@ -0,0 +1,175 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to you under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;
import java.util.HashMap;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
import org.apache.hadoop.metrics2.MetricHistogram;
import org.apache.hadoop.metrics2.lib.DynamicMetricsRegistry;
import org.apache.yetus.audience.InterfaceAudience;
import com.google.common.annotations.VisibleForTesting;
/**
* Implementation of {@link MetricsTableLatencies} to track latencies for one table in a
* RegionServer.
*/
@InterfaceAudience.Private
public class MetricsTableLatenciesImpl extends BaseSourceImpl implements MetricsTableLatencies {
private final HashMap<TableName,TableHistograms> histogramsByTable = new HashMap<>();
@VisibleForTesting
public static class TableHistograms {
final MetricHistogram getTimeHisto;
final MetricHistogram incrementTimeHisto;
final MetricHistogram appendTimeHisto;
final MetricHistogram putTimeHisto;
final MetricHistogram putBatchTimeHisto;
final MetricHistogram deleteTimeHisto;
final MetricHistogram deleteBatchTimeHisto;
final MetricHistogram scanTimeHisto;
final MetricHistogram scanSizeHisto;
TableHistograms(DynamicMetricsRegistry registry, TableName tn) {
getTimeHisto = registry.newTimeHistogram(qualifyMetricsName(tn, GET_TIME));
incrementTimeHisto = registry.newTimeHistogram(
qualifyMetricsName(tn, INCREMENT_TIME));
appendTimeHisto = registry.newTimeHistogram(qualifyMetricsName(tn, APPEND_TIME));
putTimeHisto = registry.newTimeHistogram(qualifyMetricsName(tn, PUT_TIME));
putBatchTimeHisto = registry.newTimeHistogram(qualifyMetricsName(tn, PUT_BATCH_TIME));
deleteTimeHisto = registry.newTimeHistogram(qualifyMetricsName(tn, DELETE_TIME));
deleteBatchTimeHisto = registry.newTimeHistogram(
qualifyMetricsName(tn, DELETE_BATCH_TIME));
scanTimeHisto = registry.newTimeHistogram(qualifyMetricsName(tn, SCAN_TIME));
scanSizeHisto = registry.newSizeHistogram(qualifyMetricsName(tn, SCAN_SIZE));
}
public void updatePut(long time) {
putTimeHisto.add(time);
}
public void updatePutBatch(long time) {
putBatchTimeHisto.add(time);
}
public void updateDelete(long t) {
deleteTimeHisto.add(t);
}
public void updateDeleteBatch(long t) {
deleteBatchTimeHisto.add(t);
}
public void updateGet(long t) {
getTimeHisto.add(t);
}
public void updateIncrement(long t) {
incrementTimeHisto.add(t);
}
public void updateAppend(long t) {
appendTimeHisto.add(t);
}
public void updateScanSize(long scanSize) {
scanSizeHisto.add(scanSize);
}
public void updateScanTime(long t) {
scanTimeHisto.add(t);
}
}
@VisibleForTesting
public static String qualifyMetricsName(TableName tableName, String metric) {
StringBuilder sb = new StringBuilder();
sb.append("Namespace_").append(tableName.getNamespaceAsString());
sb.append("_table_").append(tableName.getQualifierAsString());
sb.append("_metric_").append(metric);
return sb.toString();
}
@VisibleForTesting
public TableHistograms getOrCreateTableHistogram(String tableName) {
// TODO Java8's ConcurrentHashMap#computeIfAbsent would be stellar instead
final TableName tn = TableName.valueOf(tableName);
TableHistograms latency = histogramsByTable.get(tn);
if (latency == null) {
latency = new TableHistograms(getMetricsRegistry(), tn);
histogramsByTable.put(tn, latency);
}
return latency;
}
public MetricsTableLatenciesImpl() {
this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT);
}
public MetricsTableLatenciesImpl(String metricsName, String metricsDescription,
String metricsContext, String metricsJmxContext) {
super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
}
@Override
public void updatePut(String tableName, long t) {
getOrCreateTableHistogram(tableName).updatePut(t);
}
@Override
public void updatePutBatch(String tableName, long t) {
getOrCreateTableHistogram(tableName).updatePutBatch(t);
}
@Override
public void updateDelete(String tableName, long t) {
getOrCreateTableHistogram(tableName).updateDelete(t);
}
@Override
public void updateDeleteBatch(String tableName, long t) {
getOrCreateTableHistogram(tableName).updateDeleteBatch(t);
}
@Override
public void updateGet(String tableName, long t) {
getOrCreateTableHistogram(tableName).updateGet(t);
}
@Override
public void updateIncrement(String tableName, long t) {
getOrCreateTableHistogram(tableName).updateIncrement(t);
}
@Override
public void updateAppend(String tableName, long t) {
getOrCreateTableHistogram(tableName).updateAppend(t);
}
@Override
public void updateScanSize(String tableName, long scanSize) {
getOrCreateTableHistogram(tableName).updateScanSize(scanSize);
}
@Override
public void updateScanTime(String tableName, long t) {
getOrCreateTableHistogram(tableName).updateScanTime(t);
}
}

View File

@ -0,0 +1,17 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
org.apache.hadoop.hbase.regionserver.MetricsTableLatenciesImpl

View File

@ -228,6 +228,14 @@ public class MetricsAssertHelperImpl implements MetricsAssertHelper {
return gauges.get(cName).longValue();
}
@Override
public String toDebugString(BaseSource source) {
getMetrics(source);
StringBuilder sb = new StringBuilder();
sb.append("Tags=").append(tags).append(", Counters=").append(counters);
return sb.append(", Gauges=").append(gauges).toString();
}
private void reset() {
tags.clear();
gauges.clear();

View File

@ -1516,7 +1516,8 @@ public class HRegionServer extends HasThread implements
// This call sets up an initialized replication and WAL. Later we start it up.
setupWALAndReplication();
// Init in here rather than in constructor after thread name has been set
this.metricsRegionServer = new MetricsRegionServer(new MetricsRegionServerWrapperImpl(this));
this.metricsRegionServer = new MetricsRegionServer(
new MetricsRegionServerWrapperImpl(this), conf);
this.metricsTable = new MetricsTable(new MetricsTableWrapperAggregateImpl(this));
// Now that we have a metrics source, start the pause monitor
this.pauseMonitor = new JvmPauseMonitor(conf, getMetrics().getMetricsSource());

View File

@ -18,11 +18,13 @@
package org.apache.hadoop.hbase.regionserver;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.yetus.audience.InterfaceStability;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.metrics.MetricRegistries;
import org.apache.hadoop.hbase.metrics.MetricRegistry;
import org.apache.hadoop.hbase.metrics.Timer;
import org.apache.hadoop.conf.Configuration;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.yetus.audience.InterfaceStability;
import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
@ -37,16 +39,22 @@ import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTe
@InterfaceStability.Evolving
@InterfaceAudience.Private
public class MetricsRegionServer {
public static final String RS_ENABLE_TABLE_METRICS_KEY =
"hbase.regionserver.enable.table.latencies";
public static final boolean RS_ENABLE_TABLE_METRICS_DEFAULT = true;
private MetricsRegionServerSource serverSource;
private MetricsRegionServerWrapper regionServerWrapper;
private RegionServerTableMetrics tableMetrics;
private MetricRegistry metricRegistry;
private Timer bulkLoadTimer;
public MetricsRegionServer(MetricsRegionServerWrapper regionServerWrapper) {
public MetricsRegionServer(MetricsRegionServerWrapper regionServerWrapper, Configuration conf) {
this(regionServerWrapper,
CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class)
.createServer(regionServerWrapper));
.createServer(regionServerWrapper),
createTableMetrics(conf));
// Create hbase-metrics module based metrics. The registry should already be registered by the
// MetricsRegionServerSource
@ -57,9 +65,21 @@ public class MetricsRegionServer {
}
MetricsRegionServer(MetricsRegionServerWrapper regionServerWrapper,
MetricsRegionServerSource serverSource) {
MetricsRegionServerSource serverSource,
RegionServerTableMetrics tableMetrics) {
this.regionServerWrapper = regionServerWrapper;
this.serverSource = serverSource;
this.tableMetrics = tableMetrics;
}
/**
* Creates an instance of {@link RegionServerTableMetrics} only if the feature is enabled.
*/
static RegionServerTableMetrics createTableMetrics(Configuration conf) {
if (conf.getBoolean(RS_ENABLE_TABLE_METRICS_KEY, RS_ENABLE_TABLE_METRICS_DEFAULT)) {
return new RegionServerTableMetrics();
}
return null;
}
@VisibleForTesting
@ -71,22 +91,34 @@ public class MetricsRegionServer {
return regionServerWrapper;
}
public void updatePutBatch(long t) {
public void updatePutBatch(TableName tn, long t) {
if (tableMetrics != null && tn != null) {
tableMetrics.updatePut(tn, t);
}
if (t > 1000) {
serverSource.incrSlowPut();
}
serverSource.updatePutBatch(t);
}
public void updatePut(long t) {
public void updatePut(TableName tn, long t) {
if (tableMetrics != null && tn != null) {
tableMetrics.updatePut(tn, t);
}
serverSource.updatePut(t);
}
public void updateDelete(long t) {
public void updateDelete(TableName tn, long t) {
if (tableMetrics != null && tn != null) {
tableMetrics.updateDelete(tn, t);
}
serverSource.updateDelete(t);
}
public void updateDeleteBatch(long t) {
public void updateDeleteBatch(TableName tn, long t) {
if (tableMetrics != null && tn != null) {
tableMetrics.updateDelete(tn, t);
}
if (t > 1000) {
serverSource.incrSlowDelete();
}
@ -101,21 +133,30 @@ public class MetricsRegionServer {
serverSource.updateCheckAndPut(t);
}
public void updateGet(long t) {
public void updateGet(TableName tn, long t) {
if (tableMetrics != null && tn != null) {
tableMetrics.updateGet(tn, t);
}
if (t > 1000) {
serverSource.incrSlowGet();
}
serverSource.updateGet(t);
}
public void updateIncrement(long t) {
public void updateIncrement(TableName tn, long t) {
if (tableMetrics != null && tn != null) {
tableMetrics.updateIncrement(tn, t);
}
if (t > 1000) {
serverSource.incrSlowIncrement();
}
serverSource.updateIncrement(t);
}
public void updateAppend(long t) {
public void updateAppend(TableName tn, long t) {
if (tableMetrics != null && tn != null) {
tableMetrics.updateAppend(tn, t);
}
if (t > 1000) {
serverSource.incrSlowAppend();
}
@ -126,11 +167,17 @@ public class MetricsRegionServer {
serverSource.updateReplay(t);
}
public void updateScanSize(long scanSize){
public void updateScanSize(TableName tn, long scanSize){
if (tableMetrics != null && tn != null) {
tableMetrics.updateScanSize(tn, scanSize);
}
serverSource.updateScanSize(scanSize);
}
public void updateScanTime(long t) {
public void updateScanTime(TableName tn, long t) {
if (tableMetrics != null && tn != null) {
tableMetrics.updateScanTime(tn, t);
}
serverSource.updateScanTime(t);
}

View File

@ -660,6 +660,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
}
if (regionServer.metricsRegionServer != null) {
regionServer.metricsRegionServer.updateAppend(
region.getTableDescriptor().getTableName(),
EnvironmentEdgeManager.currentTime() - before);
}
return r;
@ -711,7 +712,8 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
}
if (regionServer.metricsRegionServer != null) {
regionServer.metricsRegionServer.updateIncrement(
EnvironmentEdgeManager.currentTime() - before);
region.getTableDescriptor().getTableName(),
EnvironmentEdgeManager.currentTime() - before);
}
return r;
}
@ -795,7 +797,8 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
} finally {
if (regionServer.metricsRegionServer != null) {
regionServer.metricsRegionServer.updateGet(
EnvironmentEdgeManager.currentTime() - before);
region.getTableDescriptor().getTableName(),
EnvironmentEdgeManager.currentTime() - before);
}
}
} else if (action.hasServiceCall()) {
@ -1005,10 +1008,12 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
if (regionServer.metricsRegionServer != null) {
long after = EnvironmentEdgeManager.currentTime();
if (batchContainsPuts) {
regionServer.metricsRegionServer.updatePutBatch(after - before);
regionServer.metricsRegionServer.updatePutBatch(
region.getTableDescriptor().getTableName(), after - before);
}
if (batchContainsDelete) {
regionServer.metricsRegionServer.updateDeleteBatch(after - before);
regionServer.metricsRegionServer.updateDeleteBatch(
region.getTableDescriptor().getTableName(), after - before);
}
}
}
@ -1079,11 +1084,13 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
} finally {
if (regionServer.metricsRegionServer != null) {
long after = EnvironmentEdgeManager.currentTime();
if (batchContainsPuts) {
regionServer.metricsRegionServer.updatePutBatch(after - before);
if (batchContainsPuts) {
regionServer.metricsRegionServer.updatePutBatch(
region.getTableDescriptor().getTableName(), after - before);
}
if (batchContainsDelete) {
regionServer.metricsRegionServer.updateDeleteBatch(after - before);
regionServer.metricsRegionServer.updateDeleteBatch(
region.getTableDescriptor().getTableName(), after - before);
}
}
}
@ -2341,11 +2348,12 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
final GetRequest request) throws ServiceException {
long before = EnvironmentEdgeManager.currentTime();
OperationQuota quota = null;
HRegion region = null;
try {
checkOpen();
requestCount.increment();
rpcGetRequestCount.increment();
HRegion region = getRegion(request.getRegion());
region = getRegion(request.getRegion());
GetResponse.Builder builder = GetResponse.newBuilder();
ClientProtos.Get get = request.getGet();
@ -2398,7 +2406,8 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
throw new ServiceException(ie);
} finally {
if (regionServer.metricsRegionServer != null) {
regionServer.metricsRegionServer.updateGet(EnvironmentEdgeManager.currentTime() - before);
regionServer.metricsRegionServer.updateGet(
region.getTableDescriptor().getTableName(), EnvironmentEdgeManager.currentTime() - before);
}
if (quota != null) {
quota.close();
@ -2637,6 +2646,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
RpcCallContext context = RpcServer.getCurrentCall().orElse(null);
ActivePolicyEnforcement spaceQuotaEnforcement = null;
MutationType type = null;
HRegion region = null;
long before = EnvironmentEdgeManager.currentTime();
// Clear scanner so we are not holding on to reference across call.
if (controller != null) {
@ -2646,7 +2656,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
checkOpen();
requestCount.increment();
rpcMutateRequestCount.increment();
HRegion region = getRegion(request.getRegion());
region = getRegion(request.getRegion());
MutateResponse.Builder builder = MutateResponse.newBuilder();
MutationProto mutation = request.getMutation();
if (!region.getRegionInfo().isMetaRegion()) {
@ -2759,14 +2769,16 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
if (request.hasCondition()) {
regionServer.metricsRegionServer.updateCheckAndDelete(after - before);
} else {
regionServer.metricsRegionServer.updateDelete(after - before);
regionServer.metricsRegionServer.updateDelete(
region == null ? null : region.getRegionInfo().getTable(), after - before);
}
break;
case PUT:
if (request.hasCondition()) {
regionServer.metricsRegionServer.updateCheckAndPut(after - before);
} else {
regionServer.metricsRegionServer.updatePut(after - before);
regionServer.metricsRegionServer.updatePut(
region == null ? null : region.getRegionInfo().getTable(),after - before);
}
break;
default:
@ -3094,8 +3106,10 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
long responseCellSize = context != null ? context.getResponseCellSize() : 0;
region.getMetrics().updateScanTime(end - before);
if (regionServer.metricsRegionServer != null) {
regionServer.metricsRegionServer.updateScanSize(responseCellSize);
regionServer.metricsRegionServer.updateScanTime(end - before);
regionServer.metricsRegionServer.updateScanSize(
region.getTableDescriptor().getTableName(), responseCellSize);
regionServer.metricsRegionServer.updateScanTime(
region.getTableDescriptor().getTableName(), end - before);
}
} finally {
region.closeRegionOperation();

View File

@ -0,0 +1,71 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to you under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
import org.apache.hadoop.hbase.TableName;
import org.apache.yetus.audience.InterfaceAudience;
/**
* Captures operation metrics by table. Separates metrics collection for table metrics away from
* {@link MetricsRegionServer} for encapsulation and ease of testing.
*/
@InterfaceAudience.Private
public class RegionServerTableMetrics {
private final MetricsTableLatencies latencies;
public RegionServerTableMetrics() {
latencies = CompatibilitySingletonFactory.getInstance(MetricsTableLatencies.class);
}
public void updatePut(TableName table, long time) {
latencies.updatePut(table.getNameAsString(), time);
}
public void updatePutBatch(TableName table, long time) {
latencies.updatePutBatch(table.getNameAsString(), time);
}
public void updateGet(TableName table, long time) {
latencies.updateGet(table.getNameAsString(), time);
}
public void updateIncrement(TableName table, long time) {
latencies.updateIncrement(table.getNameAsString(), time);
}
public void updateAppend(TableName table, long time) {
latencies.updateAppend(table.getNameAsString(), time);
}
public void updateDelete(TableName table, long time) {
latencies.updateDelete(table.getNameAsString(), time);
}
public void updateDeleteBatch(TableName table, long time) {
latencies.updateDeleteBatch(table.getNameAsString(), time);
}
public void updateScanTime(TableName table, long time) {
latencies.updateScanTime(table.getNameAsString(), time);
}
public void updateScanSize(TableName table, long size) {
latencies.updateScanSize(table.getNameAsString(), size);
}
}

View File

@ -50,7 +50,7 @@ public class TestMetricsRegionServer {
@Before
public void setUp() {
wrapper = new MetricsRegionServerWrapperStub();
rsm = new MetricsRegionServer(wrapper);
rsm = new MetricsRegionServer(wrapper, new Configuration(false));
serverSource = rsm.getMetricsSource();
}
@ -119,29 +119,29 @@ public class TestMetricsRegionServer {
@Test
public void testSlowCount() {
for (int i=0; i < 12; i ++) {
rsm.updateAppend(12);
rsm.updateAppend(1002);
rsm.updateAppend(null, 12);
rsm.updateAppend(null, 1002);
}
for (int i=0; i < 13; i ++) {
rsm.updateDeleteBatch(13);
rsm.updateDeleteBatch(1003);
rsm.updateDeleteBatch(null, 13);
rsm.updateDeleteBatch(null, 1003);
}
for (int i=0; i < 14; i ++) {
rsm.updateGet(14);
rsm.updateGet(1004);
rsm.updateGet(null, 14);
rsm.updateGet(null, 1004);
}
for (int i=0; i < 15; i ++) {
rsm.updateIncrement(15);
rsm.updateIncrement(1005);
rsm.updateIncrement(null, 15);
rsm.updateIncrement(null, 1005);
}
for (int i=0; i < 16; i ++) {
rsm.updatePutBatch(16);
rsm.updatePutBatch(1006);
rsm.updatePutBatch(null, 16);
rsm.updatePutBatch(null, 1006);
}
for (int i=0; i < 17; i ++) {
rsm.updatePut(17);
rsm.updateDelete(17);
rsm.updatePut(null, 17);
rsm.updateDelete(null, 17);
rsm.updateCheckAndDelete(17);
rsm.updateCheckAndPut(17);
}

View File

@ -0,0 +1,68 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import org.apache.hadoop.hbase.CompatibilityFactory;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.test.MetricsAssertHelper;
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@Category({RegionServerTests.class, SmallTests.class})
public class TestMetricsTableLatencies {
public static MetricsAssertHelper HELPER =
CompatibilityFactory.getInstance(MetricsAssertHelper.class);
@Test
public void testTableWrapperAggregateMetrics() throws IOException {
TableName tn1 = TableName.valueOf("table1");
TableName tn2 = TableName.valueOf("table2");
MetricsTableLatencies latencies = CompatibilitySingletonFactory.getInstance(
MetricsTableLatencies.class);
assertTrue("'latencies' is actually " + latencies.getClass(),
latencies instanceof MetricsTableLatenciesImpl);
MetricsTableLatenciesImpl latenciesImpl = (MetricsTableLatenciesImpl) latencies;
RegionServerTableMetrics tableMetrics = new RegionServerTableMetrics();
// Metrics to each table should be disjoint
// N.B. each call to assertGauge removes all previously acquired metrics so we have to
// make the metrics call and then immediately verify it. Trying to do multiple metrics
// updates followed by multiple verifications will fail on the 2nd verification (as the
// first verification cleaned the data structures in MetricsAssertHelperImpl).
tableMetrics.updateGet(tn1, 500L);
HELPER.assertGauge(MetricsTableLatenciesImpl.qualifyMetricsName(
tn1, MetricsTableLatencies.GET_TIME + "_" + "999th_percentile"), 500L, latenciesImpl);
tableMetrics.updatePut(tn1, 50L);
HELPER.assertGauge(MetricsTableLatenciesImpl.qualifyMetricsName(
tn1, MetricsTableLatencies.PUT_TIME + "_" + "99th_percentile"), 50L, latenciesImpl);
tableMetrics.updateGet(tn2, 300L);
HELPER.assertGauge(MetricsTableLatenciesImpl.qualifyMetricsName(
tn2, MetricsTableLatencies.GET_TIME + "_" + "999th_percentile"), 300L, latenciesImpl);
tableMetrics.updatePut(tn2, 75L);
HELPER.assertGauge(MetricsTableLatenciesImpl.qualifyMetricsName(
tn2, MetricsTableLatencies.PUT_TIME + "_" + "99th_percentile"), 75L, latenciesImpl);
}
}