HBASE-25082: Per table WAL metrics: appendCount and appendSize (#2440)

Signed-off-by: Geoffrey Jacoby <gjacoby@apache.org>
Signed-off-by: Ankit Jain <jain.ankit@salesforce.com>
Signed-off-by: Duo Zhang <zhangduo@apache.org>
(cherry picked from commit 56c7505f8f)
This commit is contained in:
Bharath Vissapragada 2020-09-23 21:06:57 -07:00
parent bffc8898c5
commit f52d64c4ee
No known key found for this signature in database
GPG Key ID: 18AE42A0B5A93FA7
4 changed files with 81 additions and 21 deletions

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.hbase.regionserver.wal;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.metrics.BaseSource;
/**
@ -77,7 +78,7 @@ public interface MetricsWALSource extends BaseSource {
/**
* Add the append size.
*/
void incrementAppendSize(long size);
void incrementAppendSize(TableName tableName, long size);
/**
* Add the time it took to append.
@ -87,7 +88,7 @@ public interface MetricsWALSource extends BaseSource {
/**
* Increment the count of wal appends
*/
void incrementAppendCount();
void incrementAppendCount(TableName tableName);
/**
* Increment the number of appends that were slow
@ -110,6 +111,4 @@ public interface MetricsWALSource extends BaseSource {
void incrementSizeLogRoll();
void incrementWrittenBytes(long val);
long getWrittenBytes();
}

View File

@ -18,7 +18,10 @@
package org.apache.hadoop.hbase.regionserver.wal;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
import org.apache.hadoop.metrics2.MetricHistogram;
import org.apache.hadoop.metrics2.lib.MutableFastCounter;
@ -44,6 +47,9 @@ public class MetricsWALSourceImpl extends BaseSourceImpl implements MetricsWALSo
private final MutableFastCounter slowSyncRollRequested;
private final MutableFastCounter sizeRollRequested;
private final MutableFastCounter writtenBytes;
// Per table metrics.
private final ConcurrentMap<TableName, MutableFastCounter> perTableAppendCount;
private final ConcurrentMap<TableName, MutableFastCounter> perTableAppendSize;
public MetricsWALSourceImpl() {
this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT);
@ -72,12 +78,24 @@ public class MetricsWALSourceImpl extends BaseSourceImpl implements MetricsWALSo
.newCounter(SLOW_SYNC_ROLL_REQUESTED, SLOW_SYNC_ROLL_REQUESTED_DESC, 0L);
sizeRollRequested = this.getMetricsRegistry()
.newCounter(SIZE_ROLL_REQUESTED, SIZE_ROLL_REQUESTED_DESC, 0L);
writtenBytes = this.getMetricsRegistry().newCounter(WRITTEN_BYTES, WRITTEN_BYTES_DESC, 0l);
writtenBytes = this.getMetricsRegistry().newCounter(WRITTEN_BYTES, WRITTEN_BYTES_DESC, 0L);
perTableAppendCount = new ConcurrentHashMap<>();
perTableAppendSize = new ConcurrentHashMap<>();
}
@Override
public void incrementAppendSize(long size) {
public void incrementAppendSize(TableName tableName, long size) {
appendSizeHisto.add(size);
MutableFastCounter tableAppendSizeCounter = perTableAppendSize.get(tableName);
if (tableAppendSizeCounter == null) {
// Ideally putIfAbsent is atomic and we don't need a branch check but we still do it to avoid
// expensive string construction for every append.
String metricsKey = String.format("%s.%s", tableName, APPEND_SIZE);
perTableAppendSize.putIfAbsent(
tableName, getMetricsRegistry().newCounter(metricsKey, APPEND_SIZE_DESC, 0L));
tableAppendSizeCounter = perTableAppendSize.get(tableName);
}
tableAppendSizeCounter.incr(size);
}
@Override
@ -86,8 +104,16 @@ public class MetricsWALSourceImpl extends BaseSourceImpl implements MetricsWALSo
}
@Override
public void incrementAppendCount() {
public void incrementAppendCount(TableName tableName) {
appendCount.incr();
MutableFastCounter tableAppendCounter = perTableAppendCount.get(tableName);
if (tableAppendCounter == null) {
String metricsKey = String.format("%s.%s", tableName, APPEND_COUNT);
perTableAppendCount.putIfAbsent(
tableName, getMetricsRegistry().newCounter(metricsKey, APPEND_COUNT_DESC, 0L));
tableAppendCounter = perTableAppendCount.get(tableName);
}
tableAppendCounter.incr();
}
@Override
@ -129,10 +155,4 @@ public class MetricsWALSourceImpl extends BaseSourceImpl implements MetricsWALSo
public void incrementWrittenBytes(long val) {
writtenBytes.incr(val);
}
@Override
public long getWrittenBytes() {
return writtenBytes.value();
}
}

View File

@ -26,6 +26,7 @@ import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.wal.WALKey;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
import org.apache.hadoop.util.StringUtils;
@ -57,9 +58,10 @@ public class MetricsWAL extends WALActionsListener.Base {
@Override
public void postAppend(final long size, final long time, final WALKey logkey,
final WALEdit logEdit) throws IOException {
source.incrementAppendCount();
TableName tableName = logkey.getTablename();
source.incrementAppendCount(tableName);
source.incrementAppendTime(time);
source.incrementAppendSize(size);
source.incrementAppendSize(tableName, size);
source.incrementWrittenBytes(size);
if (time > 1000) {

View File

@ -1,5 +1,4 @@
/**
*
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -20,11 +19,15 @@
package org.apache.hadoop.hbase.regionserver.wal;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.wal.WALKey;
import org.apache.hadoop.metrics2.lib.DynamicMetricsRegistry;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import java.util.concurrent.TimeUnit;
import static org.junit.Assert.assertEquals;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
@ -65,10 +68,46 @@ public class TestMetricsWAL {
public void testWalWrittenInBytes() throws Exception {
MetricsWALSource source = mock(MetricsWALSourceImpl.class);
MetricsWAL metricsWAL = new MetricsWAL(source);
metricsWAL.postAppend(100, 900, null, null);
metricsWAL.postAppend(200, 2000, null, null);
TableName tableName = TableName.valueOf("foo");
WALKey walKey = new WALKey(null, tableName, -1);
metricsWAL.postAppend(100, 900, walKey, null);
metricsWAL.postAppend(200, 2000, walKey, null);
verify(source, times(1)).incrementWrittenBytes(100);
verify(source, times(1)).incrementWrittenBytes(200);
}
@Test
public void testPerTableWALMetrics() throws Exception {
final MetricsWALSourceImpl source = new MetricsWALSourceImpl("foo", "foo", "foo", "foo");
final int numThreads = 10;
final int numIters = 10;
final CountDownLatch latch = new CountDownLatch(numThreads);
for (int i = 0; i < numThreads; i++) {
final TableName tableName = TableName.valueOf("tab_" + i);
final long size = i;
new Thread(new Runnable() {
@Override
public void run() {
for (int j = 0; j < numIters; j++) {
source.incrementAppendCount(tableName);
source.incrementAppendSize(tableName, size);
}
latch.countDown();
}
}).start();
}
// Wait for threads to finish.
latch.await();
DynamicMetricsRegistry registry = source.getMetricsRegistry();
// Validate the metrics
for (int i = 0; i < numThreads; i++) {
TableName tableName = TableName.valueOf("tab_" + i);
long tableAppendCount =
registry.getCounter(tableName + "." + MetricsWALSource.APPEND_COUNT, -1).value();
assertEquals(numIters, tableAppendCount);
long tableAppendSize =
registry.getCounter(tableName + "." + MetricsWALSource.APPEND_SIZE, -1).value();
assertEquals(i * numIters, tableAppendSize);
}
}
}