[jira] [HBASE-4809] Per-CF set RPC metrics

Summary: Porting RPC metrics incremented for each column family set touched by a
request from 0.89-fb.

Test Plan:
Unit tests, 5-node cluster. Observe metrics through JMX.
The current version of patch compiles but no testing done yet.

Reviewers: nspiegelberg, JIRA, Kannan, Karthik

Reviewed By: nspiegelberg

CC: nspiegelberg, mbautin

Differential Revision: 483

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1204799 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Nicolas Spiegelberg 2011-11-22 03:48:58 +00:00
parent a8ea368223
commit 4a2bea25be
8 changed files with 284 additions and 48 deletions

View File

@ -62,13 +62,13 @@ import org.apache.hadoop.hbase.DroppedSnapshotException;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HConstants.OperationStatusCode;
import org.apache.hadoop.hbase.HDFSBlocksDistribution;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.NotServingRegionException;
import org.apache.hadoop.hbase.UnknownScannerException;
import org.apache.hadoop.hbase.HConstants.OperationStatusCode;
import org.apache.hadoop.hbase.client.Append;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
@ -80,10 +80,10 @@ import org.apache.hadoop.hbase.client.RowLock;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.coprocessor.Exec;
import org.apache.hadoop.hbase.client.coprocessor.ExecResult;
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.filter.IncompatibleFilterException;
import org.apache.hadoop.hbase.filter.WritableByteArrayComparable;
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
import org.apache.hadoop.hbase.io.HeapSize;
import org.apache.hadoop.hbase.io.TimeRange;
import org.apache.hadoop.hbase.io.hfile.BlockCache;
@ -93,6 +93,7 @@ import org.apache.hadoop.hbase.ipc.HBaseRPC;
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.monitoring.TaskMonitor;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
@ -311,9 +312,13 @@ public class HRegion implements HeapSize { // , Writable{
// These ones are not reset to zero when queried, unlike the previous.
public static final ConcurrentMap<String, AtomicLong> numericPersistentMetrics = new ConcurrentHashMap<String, AtomicLong>();
// Used for metrics where we want track a metrics (such as latency)
// over a number of operations.
public static final ConcurrentMap<String, Pair<AtomicLong, AtomicInteger>> timeVaryingMetrics = new ConcurrentHashMap<String, Pair<AtomicLong, AtomicInteger>>();
/**
* Used for metrics where we want track a metrics (such as latency) over a
* number of operations.
*/
public static final ConcurrentMap<String, Pair<AtomicLong, AtomicInteger>>
timeVaryingMetrics = new ConcurrentHashMap<String,
Pair<AtomicLong, AtomicInteger>>();
public static void incrNumericMetric(String key, long amount) {
AtomicLong oldVal = numericMetrics.get(key);
@ -1231,7 +1236,8 @@ public class HRegion implements HeapSize { // , Writable{
* @throws DroppedSnapshotException Thrown when replay of hlog is required
* because a Snapshot was not properly persisted.
*/
protected boolean internalFlushcache(MonitoredTask status) throws IOException {
protected boolean internalFlushcache(MonitoredTask status)
throws IOException {
return internalFlushcache(this.log, -1, status);
}
@ -1666,6 +1672,15 @@ public class HRegion implements HeapSize { // , Writable{
} finally {
this.updatesLock.readLock().unlock();
}
// do after lock
final long after = EnvironmentEdgeManager.currentTimeMillis();
final String metricPrefix = SchemaMetrics.generateSchemaMetricsPrefix(
getTableDesc().getNameAsString(), familyMap.keySet());
if (!metricPrefix.isEmpty()) {
HRegion.incrTimeVaryingMetric(metricPrefix + "delete_", after - now);
}
if (flush) {
// Request a cache flush. Do it outside update lock.
requestFlush();
@ -1811,6 +1826,12 @@ public class HRegion implements HeapSize { // , Writable{
@SuppressWarnings("unchecked")
private long doMiniBatchPut(
BatchOperationInProgress<Pair<Put, Integer>> batchOp) throws IOException {
String metricPrefix = null;
final String tableName = getTableDesc().getNameAsString();
// variable to note if all Put items are for the same CF -- metrics related
boolean cfSetConsistent = true;
long startTimeMs = EnvironmentEdgeManager.currentTimeMillis();
WALEdit walEdit = new WALEdit();
/* Run coprocessor pre hook outside of locks to avoid deadlock */
@ -1887,6 +1908,21 @@ public class HRegion implements HeapSize { // , Writable{
}
lastIndexExclusive++;
numReadyToWrite++;
// If first time around, designate a prefix for metrics based on the CF
// set. After that, watch for inconsistencies.
final String curMetricPrefix =
SchemaMetrics.generateSchemaMetricsPrefix(tableName,
put.getFamilyMap().keySet());
if (metricPrefix == null) {
metricPrefix = curMetricPrefix;
} else if (cfSetConsistent && !metricPrefix.equals(curMetricPrefix)) {
// The column family set for this batch put is undefined.
cfSetConsistent = false;
metricPrefix = SchemaMetrics.generateSchemaMetricsPrefix(tableName,
SchemaMetrics.UNKNOWN);
}
}
// we should record the timestamp only after we have acquired the rowLock,
@ -2027,6 +2063,15 @@ public class HRegion implements HeapSize { // , Writable{
releaseRowLock(toRelease);
}
}
// do after lock
final long endTimeMs = EnvironmentEdgeManager.currentTimeMillis();
if (metricPrefix == null) {
metricPrefix = SchemaMetrics.CF_BAD_FAMILY_PREFIX;
}
HRegion.incrTimeVaryingMetric(metricPrefix + "multiput_",
endTimeMs - startTimeMs);
if (!success) {
for (int i = firstIndex; i < lastIndexExclusive; i++) {
if (batchOp.retCodeDetails[i].getOperationStatusCode() == OperationStatusCode.NOT_RUN) {
@ -2275,6 +2320,14 @@ public class HRegion implements HeapSize { // , Writable{
coprocessorHost.postPut(put, walEdit, writeToWAL);
}
// do after lock
final long after = EnvironmentEdgeManager.currentTimeMillis();
final String metricPrefix = SchemaMetrics.generateSchemaMetricsPrefix(
this.getTableDesc().getNameAsString(), familyMap.keySet());
if (!metricPrefix.isEmpty()) {
HRegion.incrTimeVaryingMetric(metricPrefix + "put_", after - now);
}
if (flush) {
// Request a cache flush. Do it outside update lock.
requestFlush();
@ -3045,7 +3098,7 @@ public class HRegion implements HeapSize { // , Writable{
}
@Override
public synchronized boolean next(List<KeyValue> outResults, int limit)
public synchronized boolean next(List<KeyValue> outResults, int limit)
throws IOException {
if (this.filterClosed) {
throw new UnknownScannerException("Scanner was closed (timed out?) " +
@ -3075,7 +3128,7 @@ public class HRegion implements HeapSize { // , Writable{
}
@Override
public synchronized boolean next(List<KeyValue> outResults)
public synchronized boolean next(List<KeyValue> outResults)
throws IOException {
// apply the batching limit by default
return next(outResults, batch);
@ -3172,7 +3225,7 @@ public class HRegion implements HeapSize { // , Writable{
}
@Override
public synchronized void close() {
public synchronized void close() {
if (storeHeap != null) {
storeHeap.close();
storeHeap = null;
@ -3839,6 +3892,7 @@ public class HRegion implements HeapSize { // , Writable{
*/
private List<KeyValue> get(Get get, boolean withCoprocessor)
throws IOException {
long now = EnvironmentEdgeManager.currentTimeMillis();
Scan scan = new Scan(get);
List<KeyValue> results = new ArrayList<KeyValue>();
@ -3864,6 +3918,14 @@ public class HRegion implements HeapSize { // , Writable{
coprocessorHost.postGet(get, results);
}
// do after lock
final long after = EnvironmentEdgeManager.currentTimeMillis();
final String metricPrefix = SchemaMetrics.generateSchemaMetricsPrefix(
this.getTableDesc().getNameAsString(), get.familySet());
if (!metricPrefix.isEmpty()) {
HRegion.incrTimeVaryingMetric(metricPrefix + "get_", after - now);
}
return results;
}
@ -4132,6 +4194,9 @@ public class HRegion implements HeapSize { // , Writable{
public long incrementColumnValue(byte [] row, byte [] family,
byte [] qualifier, long amount, boolean writeToWAL)
throws IOException {
// to be used for metrics
long before = EnvironmentEdgeManager.currentTimeMillis();
checkRow(row, "increment");
boolean flush = false;
boolean wrongLength = false;
@ -4203,6 +4268,12 @@ public class HRegion implements HeapSize { // , Writable{
closeRegionOperation();
}
// do after lock
long after = EnvironmentEdgeManager.currentTimeMillis();
String metricPrefix = SchemaMetrics.generateSchemaMetricsPrefix(
getTableDesc().getName(), family);
HRegion.incrTimeVaryingMetric(metricPrefix + "increment_", after - before);
if (flush) {
// Request a cache flush. Do it outside update lock.
requestFlush();

View File

@ -20,6 +20,11 @@
package org.apache.hadoop.hbase.regionserver;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.NavigableSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.DoNotRetryIOException;
@ -27,12 +32,8 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.filter.Filter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.LinkedList;
import java.util.List;
import java.util.NavigableSet;
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
import org.apache.hadoop.hbase.util.Bytes;
/**
* Scanner scans both the memstore and the HStore. Coalesce KeyValue stream
@ -46,6 +47,8 @@ class StoreScanner extends NonLazyKeyValueScanner
private KeyValueHeap heap;
private boolean cacheBlocks;
private String metricNameGetSize;
// Used to indicate that the scanner has closed (see HBASE-1107)
// Doesnt need to be volatile because it's always accessed via synchronized methods
private boolean closing = false;
@ -90,6 +93,7 @@ class StoreScanner extends NonLazyKeyValueScanner
StoreScanner(Store store, Scan scan, final NavigableSet<byte[]> columns)
throws IOException {
this(store, scan.getCacheBlocks(), scan, columns);
initializeMetricNames();
if (columns != null && scan.isRaw()) {
throw new DoNotRetryIOException(
"Cannot specify any column for a raw scan");
@ -134,6 +138,7 @@ class StoreScanner extends NonLazyKeyValueScanner
List<? extends KeyValueScanner> scanners, ScanType scanType,
long smallestReadPoint, long earliestPutTs) throws IOException {
this(store, false, scan, null);
initializeMetricNames();
matcher = new ScanQueryMatcher(scan, store.scanInfo, null, scanType,
smallestReadPoint, earliestPutTs);
@ -151,6 +156,7 @@ class StoreScanner extends NonLazyKeyValueScanner
StoreScanner.ScanType scanType, final NavigableSet<byte[]> columns,
final List<KeyValueScanner> scanners) throws IOException {
this(null, scan.getCacheBlocks(), scan, columns);
this.initializeMetricNames();
this.matcher = new ScanQueryMatcher(scan, scanInfo, columns, scanType,
Long.MAX_VALUE, HConstants.LATEST_TIMESTAMP);
@ -161,6 +167,23 @@ class StoreScanner extends NonLazyKeyValueScanner
heap = new KeyValueHeap(scanners, scanInfo.getComparator());
}
/**
* Method used internally to initialize metric names throughout the
* constructors.
*
* To be called after the store variable has been initialized!
*/
private void initializeMetricNames() {
String tableName = SchemaMetrics.UNKNOWN;
String family = SchemaMetrics.UNKNOWN;
if (store != null) {
tableName = store.getTableName();
family = Bytes.toString(store.getFamily().getName());
}
metricNameGetSize = SchemaMetrics.generateSchemaMetricsPrefix(
tableName, family) + "getsize";
}
/*
* @return List of scanners ordered properly.
*/
@ -207,6 +230,7 @@ class StoreScanner extends NonLazyKeyValueScanner
return scanners;
}
@Override
public synchronized KeyValue peek() {
if (this.heap == null) {
return this.lastTop;
@ -214,11 +238,13 @@ class StoreScanner extends NonLazyKeyValueScanner
return this.heap.peek();
}
@Override
public KeyValue next() {
// throw runtime exception perhaps?
throw new RuntimeException("Never call StoreScanner.next()");
}
@Override
public synchronized void close() {
if (this.closing) return;
this.closing = true;
@ -231,6 +257,7 @@ class StoreScanner extends NonLazyKeyValueScanner
this.lastTop = null; // If both are null, we are closed.
}
@Override
public synchronized boolean seek(KeyValue key) throws IOException {
if (this.heap == null) {
@ -248,6 +275,7 @@ class StoreScanner extends NonLazyKeyValueScanner
* @param limit
* @return true if there are more rows, false if scanner is done
*/
@Override
public synchronized boolean next(List<KeyValue> outResult, int limit) throws IOException {
checkReseek();
@ -308,6 +336,7 @@ class StoreScanner extends NonLazyKeyValueScanner
this.heap.next();
}
HRegion.incrNumericMetric(metricNameGetSize, kv.getLength());
if (limit > 0 && (results.size() == limit)) {
break LOOP;
}
@ -370,11 +399,13 @@ class StoreScanner extends NonLazyKeyValueScanner
return false;
}
@Override
public synchronized boolean next(List<KeyValue> outResult) throws IOException {
return next(outResult, -1);
}
// Implementation of ChangedReadersObserver
@Override
public synchronized void updateReaders() throws IOException {
if (this.closing) return;

View File

@ -22,6 +22,7 @@ package org.apache.hadoop.hbase.regionserver.metrics;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Set;
@ -168,6 +169,9 @@ public class SchemaMetrics {
public static final String CF_PREFIX = "cf.";
public static final String BLOCK_TYPE_PREFIX = "bt.";
public static final String CF_UNKNOWN_PREFIX = CF_PREFIX + UNKNOWN + ".";
public static final String CF_BAD_FAMILY_PREFIX = CF_PREFIX + "__badfamily.";
/**
* A special schema metric value that means "all tables aggregated" or
* "all column families aggregated" when used as a table name or a column
@ -201,6 +205,16 @@ public class SchemaMetrics {
private static final String SHOW_TABLE_NAME_CONF_KEY =
"hbase.metrics.showTableName";
/** We use this when too many column families are involved in a request. */
private static final String MORE_CFS_OMITTED_STR = "__more";
/**
* Maximum length of a metric name prefix. Used when constructing metric
* names from a set of column families participating in a request.
*/
private static final int MAX_METRIC_PREFIX_LENGTH =
256 - MORE_CFS_OMITTED_STR.length();
// Global variables
/** All instances of this class */
private static final ConcurrentHashMap<String, SchemaMetrics>
@ -230,9 +244,8 @@ public class SchemaMetrics {
private final String[] storeMetricNames = new String[NUM_STORE_METRIC_TYPES];
private SchemaMetrics(final String tableName, final String cfName) {
String metricPrefix =
tableName.equals(TOTAL_KEY) ? "" : TABLE_PREFIX + tableName + ".";
metricPrefix += cfName.equals(TOTAL_KEY) ? "" : CF_PREFIX + cfName + ".";
String metricPrefix = SchemaMetrics.generateSchemaMetricsPrefix(
tableName, cfName);
for (BlockCategory blockCategory : BlockCategory.values()) {
for (boolean isCompaction : BOOL_VALUES) {
@ -292,25 +305,12 @@ public class SchemaMetrics {
tableName = UNKNOWN;
}
if (!tableName.equals(TOTAL_KEY)) {
// We are provided with a non-trivial table name (including "unknown").
// We need to know whether table name should be included into metrics.
if (useTableNameGlobally == null) {
throw new IllegalStateException("The value of the "
+ SHOW_TABLE_NAME_CONF_KEY + " conf option has not been specified "
+ "in SchemaMetrics");
}
final boolean useTableName = useTableNameGlobally;
if (!useTableName) {
// Don't include table name in metric keys.
tableName = TOTAL_KEY;
}
}
if (cfName == null) {
cfName = UNKNOWN;
}
tableName = getEffectiveTableName(tableName);
final String instanceKey = tableName + "\t" + cfName;
SchemaMetrics schemaMetrics = cfToMetrics.get(instanceKey);
if (schemaMetrics != null) {
@ -495,6 +495,103 @@ public class SchemaMetrics {
setUseTableName(useTableNameNew);
}
/**
* Determine the table name to be included in metric keys. If the global
* configuration says that we should not use table names in metrics,
* we always return {@link #TOTAL_KEY} even if nontrivial table name is
* provided.
*
* @param tableName a table name or {@link #TOTAL_KEY} when aggregating
* across all tables
* @return the table name to use in metric keys
*/
private static String getEffectiveTableName(String tableName) {
if (!tableName.equals(TOTAL_KEY)) {
// We are provided with a non-trivial table name (including "unknown").
// We need to know whether table name should be included into metrics.
if (useTableNameGlobally == null) {
throw new IllegalStateException("The value of the "
+ SHOW_TABLE_NAME_CONF_KEY + " conf option has not been specified "
+ "in SchemaMetrics");
}
final boolean useTableName = useTableNameGlobally;
if (!useTableName) {
// Don't include table name in metric keys.
tableName = TOTAL_KEY;
}
}
return tableName;
}
/**
* Method to transform a combination of a table name and a column family name
* into a metric key prefix. Tables/column family names equal to
* {@link #TOTAL_KEY} are omitted from the prefix.
*
* @param tableName the table name or {@link #TOTAL_KEY} for all tables
* @param cfName the column family name or {@link #TOTAL_KEY} for all CFs
* @return the metric name prefix, ending with a dot.
*/
public static String generateSchemaMetricsPrefix(String tableName,
final String cfName) {
tableName = getEffectiveTableName(tableName);
String schemaMetricPrefix =
tableName.equals(TOTAL_KEY) ? "" : TABLE_PREFIX + tableName + ".";
schemaMetricPrefix +=
cfName.equals(TOTAL_KEY) ? "" : CF_PREFIX + cfName + ".";
return schemaMetricPrefix;
}
public static String generateSchemaMetricsPrefix(byte[] tableName,
byte[] cfName) {
return generateSchemaMetricsPrefix(Bytes.toString(tableName),
Bytes.toString(cfName));
}
/**
* Method to transform a set of column families in byte[] format with table
* name into a metric key prefix.
*
* @param tableName the table name or {@link #TOTAL_KEY} for all tables
* @param families the ordered set of column families
* @return the metric name prefix, ending with a dot, or an empty string in
* case of invalid arguments. This is OK since we always expect
* some CFs to be included.
*/
public static String generateSchemaMetricsPrefix(String tableName,
Set<byte[]> families) {
if (families == null || families.isEmpty() ||
tableName == null || tableName.isEmpty()) {
return "";
}
if (families.size() == 1) {
return generateSchemaMetricsPrefix(tableName,
Bytes.toString(families.iterator().next()));
}
tableName = getEffectiveTableName(tableName);
List<byte[]> sortedFamilies = new ArrayList<byte[]>(families);
Collections.sort(sortedFamilies, Bytes.BYTES_COMPARATOR);
StringBuilder sb = new StringBuilder();
int numCFsLeft = families.size();
for (byte[] family : sortedFamilies) {
if (sb.length() > MAX_METRIC_PREFIX_LENGTH) {
sb.append(MORE_CFS_OMITTED_STR);
break;
}
--numCFsLeft;
sb.append(Bytes.toString(family));
if (numCFsLeft > 0) {
sb.append("~");
}
}
return SchemaMetrics.generateSchemaMetricsPrefix(tableName, sb.toString());
}
/**
* Sets the flag of whether to use table name in metric names. This flag
* is specified in configuration and is not expected to change at runtime,

View File

@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.io.hfile.LruBlockCache;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.MemStore;
import org.apache.hadoop.hbase.regionserver.Store;
import org.apache.hadoop.hbase.regionserver.metrics.SchemaConfigured;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ClassSize;
import org.junit.experimental.categories.Category;
@ -318,5 +319,9 @@ public class TestHeapSize extends TestCase {
// accounted for. But we have satisfied our two core requirements.
// Sizing is quite accurate now, and our tests will throw errors if
// any of these classes are modified without updating overhead sizes.
SchemaConfigured sc = new SchemaConfigured(null, "myTable", "myCF");
assertEquals(ClassSize.estimateBase(SchemaConfigured.class, true),
sc.heapSize());
}
}

View File

@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.regionserver.Store.ScanInfo;
import org.apache.hadoop.hbase.regionserver.StoreScanner.ScanType;
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
import org.apache.hadoop.hbase.util.Bytes;
import com.google.common.base.Joiner;
@ -62,6 +63,7 @@ public class TestMemStore extends TestCase {
super.setUp();
this.mvcc = new MultiVersionConsistencyControl();
this.memstore = new MemStore();
SchemaMetrics.setUseTableNameInTest(false);
}
public void testPutSameKey() {

View File

@ -25,6 +25,7 @@ import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.regionserver.Store.ScanInfo;
import org.apache.hadoop.hbase.regionserver.StoreScanner.ScanType;
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.experimental.categories.Category;
@ -44,6 +45,11 @@ public class TestStoreScanner extends TestCase {
KeyValue.COMPARATOR);
private ScanType scanType = ScanType.USER_SCAN;
public void setUp() throws Exception {
super.setUp();
SchemaMetrics.setUseTableNameInTest(false);
}
/*
* Test utility for building a NavigableSet for scanners.
* @param strCols

View File

@ -45,13 +45,6 @@ public class TestSchemaConfigured {
private static final Path TMP_HFILE_PATH = new Path(
"/hbase/myTable/myRegion/" + HRegion.REGION_TEMP_SUBDIR + "/hfilename");
@Test
public void testHeapSize() {
SchemaConfigured sc = new SchemaConfigured(null, TABLE_NAME, CF_NAME);
assertEquals(ClassSize.estimateBase(SchemaConfigured.class, true),
sc.heapSize());
}
/** Test if toString generates real JSON */
@Test
public void testToString() throws JSONException {

View File

@ -19,21 +19,25 @@
package org.apache.hadoop.hbase.regionserver.metrics;
import static org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics.
BOOL_VALUES;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
import java.util.Collection;
import java.util.HashSet;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.io.hfile.BlockType;
import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory;
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
import static org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics.
BOOL_VALUES;
import static org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics.
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics.
BlockMetricType;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ClassSize;
import org.junit.Before;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@ -41,8 +45,6 @@ import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
import static org.junit.Assert.*;
@Category(MediumTests.class)
@RunWith(Parameterized.class)
public class TestSchemaMetrics {
@ -213,4 +215,33 @@ public class TestSchemaMetrics {
}
}
@Test
public void testGenerateSchemaMetricsPrefix() {
String tableName = "table1";
int numCF = 3;
StringBuilder expected = new StringBuilder();
if (useTableName) {
expected.append("tbl.");
expected.append(tableName);
expected.append(".");
}
expected.append("cf.");
Set<byte[]> families = new HashSet<byte[]>();
for (int i = 1; i <= numCF; i++) {
String cf = "cf" + i;
families.add(Bytes.toBytes(cf));
expected.append(cf);
if (i == numCF) {
expected.append(".");
} else {
expected.append("~");
}
}
String result = SchemaMetrics.generateSchemaMetricsPrefix(tableName,
families);
assertEquals(expected.toString(), result);
}
}