Revert "HBASE-15728 Add Per-Table metrics back"

This reverts commit fb74f215b4.
This commit is contained in:
Andrew Purtell 2018-09-04 13:12:20 -07:00
parent dfb1af4892
commit cf198a65e8
21 changed files with 234 additions and 856 deletions

View File

@ -248,9 +248,6 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo
String TOTAL_ROW_ACTION_REQUEST_COUNT_DESC =
"Total number of region requests this RegionServer has answered, count by row-level action";
String READ_REQUEST_COUNT = "readRequestCount";
String FILTERED_READ_REQUEST_COUNT = "filteredReadRequestCount";
String FILTERED_READ_REQUEST_COUNT_DESC =
"Number of read requests this region server has answered.";
String READ_REQUEST_COUNT_DESC =
"Number of read requests this region server has answered.";
String WRITE_REQUEST_COUNT = "writeRequestCount";

View File

@ -94,8 +94,6 @@ public interface MetricsRegionWrapper {
*/
long getWriteRequestCount();
long getTotalRequestCount();
long getNumFilesCompacted();
long getNumBytesCompacted();

View File

@ -50,16 +50,18 @@ public interface MetricsTableAggregateSource extends BaseSource {
String NUMBER_OF_TABLES_DESC = "Number of tables in the metrics system";
/**
* Returns MetricsTableSource registered for the table. Creates one if not defined.
* Register a MetricsTableSource as being open.
*
* @param table The table name
* @param source the source for the table being opened.
*/
MetricsTableSource getOrCreateTableSource(String table, MetricsTableWrapperAggregate wrapper);
void register(String table, MetricsTableSource source);
/**
* Remove a table's source. This is called when regions of a table are closed.
*
* @param table The table name
*/
void deleteTableSource(String table);
void deregister(String table);
}

View File

@ -18,14 +18,22 @@
package org.apache.hadoop.hbase.regionserver;
import java.io.Closeable;
/**
* This interface will be implemented to allow region server to push table metrics into
* MetricsRegionAggregateSource that will in turn push data to the Hadoop metrics system.
*/
public interface MetricsTableSource extends Comparable<MetricsTableSource>, Closeable {
public interface MetricsTableSource extends Comparable<MetricsTableSource> {
String READ_REQUEST_COUNT = "readRequestCount";
String READ_REQUEST_COUNT_DESC = "Number fo read requests";
String WRITE_REQUEST_COUNT = "writeRequestCount";
String WRITE_REQUEST_COUNT_DESC = "Number fo write requests";
String TOTAL_REQUEST_COUNT = "totalRequestCount";
String TOTAL_REQUEST_COUNT_DESC = "Number fo total requests";
String MEMSTORE_SIZE = "memstoreSize";
String MEMSTORE_SIZE_DESC = "The size of memory stores";
String STORE_FILE_SIZE = "storeFileSize";
String STORE_FILE_SIZE_DESC = "The size of store files size";
String TABLE_SIZE = "tableSize";
String TABLE_SIZE_DESC = "Total size of the table in the region server";
@ -34,83 +42,11 @@ public interface MetricsTableSource extends Comparable<MetricsTableSource>, Clos
/**
* Close the table's metrics as all the region are closing.
*/
@Override
void close();
void registerMetrics();
/**
* Get the aggregate source to which this reports.
*/
MetricsTableAggregateSource getAggregateSource();
/**
* Update the split transaction time histogram
* @param t time it took, in milliseconds
*/
void updateSplitTime(long t);
/**
* Increment number of a requested splits
*/
void incrSplitRequest();
/**
* Increment number of successful splits
*/
void incrSplitSuccess();
/**
* Update the flush time histogram
* @param t time it took, in milliseconds
*/
void updateFlushTime(long t);
/**
* Update the flush memstore size histogram
* @param bytes the number of bytes in the memstore
*/
void updateFlushMemstoreSize(long bytes);
/**
* Update the flush output file size histogram
* @param bytes the number of bytes in the output file
*/
void updateFlushOutputSize(long bytes);
/**
* Update the compaction time histogram, both major and minor
* @param isMajor whether compaction is a major compaction
* @param t time it took, in milliseconds
*/
void updateCompactionTime(boolean isMajor, long t);
/**
* Update the compaction input number of files histogram
* @param isMajor whether compaction is a major compaction
* @param c number of files
*/
void updateCompactionInputFileCount(boolean isMajor, long c);
/**
* Update the compaction total input file size histogram
* @param isMajor whether compaction is a major compaction
* @param bytes the number of bytes of the compaction input file
*/
void updateCompactionInputSize(boolean isMajor, long bytes);
/**
* Update the compaction output number of files histogram
* @param isMajor whether compaction is a major compaction
* @param c number of files
*/
void updateCompactionOutputFileCount(boolean isMajor, long c);
/**
* Update the compaction total output file size
* @param isMajor whether compaction is a major compaction
* @param bytes the number of bytes of the compaction input file
*/
void updateCompactionOutputSize(boolean isMajor, long bytes);
}

View File

@ -28,17 +28,12 @@ public interface MetricsTableWrapperAggregate {
/**
* Get the number of read requests that have been issued against this table
*/
long getReadRequestCount(String table);
/**
* Get the total number of filtered read requests that have been issued against this table
*/
long getFilteredReadRequestCount(String table);
long getReadRequestsCount(String table);
/**
* Get the number of write requests that have been issued against this table
*/
long getWriteRequestCount(String table);
long getWriteRequestsCount(String table);
/**
* Get the total number of requests that have been issued against this table
@ -46,57 +41,17 @@ public interface MetricsTableWrapperAggregate {
long getTotalRequestsCount(String table);
/**
* Get the memory store size for this table
* Get the memory store size against this table
*/
long getMemstoreSize(String table);
long getMemstoresSize(String table);
/**
* Get the store file size for this table
* Get the store file size against this table
*/
long getStoreFileSize(String table);
long getStoreFilesSize(String table);
/**
* Get the table region size for this table
* Get the table region size against this table
*/
long getTableSize(String table);
/**
* Get the average region size for this table
*/
long getAvgRegionSize(String table);
/**
* Get the number of regions hosted on for this table
*/
long getNumRegions(String table);
/**
* Get the number of stores hosted on for this table
*/
long getNumStores(String table);
/**
* Get the number of store files hosted for this table
*/
long getNumStoreFiles(String table);
/**
* @return Max age of store files for this table
*/
long getMaxStoreFileAge(String table);
/**
* @return Min age of store files for this table
*/
long getMinStoreFileAge(String table);
/**
* @return Average age of store files for this table
*/
long getAvgStoreFileAge(String table);
/**
* @return Number of reference files for this table
*/
long getNumReferenceFiles(String table);
}

View File

@ -54,10 +54,9 @@ public class MetricsRegionServerSourceImpl
private final MutableFastCounter slowGet;
private final MutableFastCounter slowIncrement;
private final MutableFastCounter slowAppend;
// split related metrics
private final MutableFastCounter splitRequest;
private final MutableFastCounter splitSuccess;
private final MetricHistogram splitTimeHisto;
// flush related metrics

View File

@ -22,7 +22,6 @@ import java.util.concurrent.ConcurrentHashMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
import org.apache.hadoop.hbase.metrics.Interns;
@ -47,48 +46,24 @@ implements MetricsTableAggregateSource {
super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
}
private void register(MetricsTableSource source) {
synchronized (this) {
source.registerMetrics();
}
@Override
public void register(String table, MetricsTableSource source) {
tableSources.put(table, source);
}
@Override
public void deleteTableSource(String table) {
public void deregister(String table) {
try {
synchronized (this) {
MetricsTableSource source = tableSources.remove(table);
if (source != null) {
source.close();
}
}
tableSources.remove(table);
} catch (Exception e) {
// Ignored. If this errors out it means that someone is double
// closing the user source and the user metrics is already nulled out.
LOG.info("Error trying to remove " + table + " from " + getClass().getSimpleName(), e);
// closing the region source and the region is already nulled out.
LOG.info(
"Error trying to remove " + table + " from " + this.getClass().getSimpleName(),
e);
}
}
@Override
public MetricsTableSource getOrCreateTableSource(String table,
MetricsTableWrapperAggregate wrapper) {
MetricsTableSource source = tableSources.get(table);
if (source != null) {
return source;
}
source = CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class)
.createTable(table, wrapper);
MetricsTableSource prev = tableSources.putIfAbsent(table, source);
if (prev != null) {
return prev;
} else {
// register the new metrics now
register(source);
}
return source;
}
/**
* Yes this is a get function that doesn't return anything. Thanks Hadoop for breaking all
* expectations of java programmers. Instead of returning anything Hadoop metrics expects

View File

@ -25,54 +25,8 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.metrics.Interns;
import org.apache.hadoop.metrics2.MetricHistogram;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.lib.DynamicMetricsRegistry;
import org.apache.hadoop.metrics2.lib.MutableFastCounter;
import static org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource.COMPACTED_INPUT_BYTES;
import static org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource.COMPACTED_INPUT_BYTES_DESC;
import static org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource.COMPACTED_OUTPUT_BYTES;
import static org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource.COMPACTED_OUTPUT_BYTES_DESC;
import static org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource.COMPACTION_INPUT_FILE_COUNT;
import static org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource.COMPACTION_INPUT_FILE_COUNT_DESC;
import static org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource.COMPACTION_INPUT_SIZE;
import static org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource.COMPACTION_INPUT_SIZE_DESC;
import static org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource.COMPACTION_OUTPUT_FILE_COUNT;
import static org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource.COMPACTION_OUTPUT_FILE_COUNT_DESC;
import static org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource.COMPACTION_OUTPUT_SIZE;
import static org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource.COMPACTION_OUTPUT_SIZE_DESC;
import static org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource.COMPACTION_TIME;
import static org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource.COMPACTION_TIME_DESC;
import static org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource.FLUSHED_MEMSTORE_BYTES;
import static org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource.FLUSHED_MEMSTORE_BYTES_DESC;
import static org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource.FLUSHED_OUTPUT_BYTES;
import static org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource.FLUSHED_OUTPUT_BYTES_DESC;
import static org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource.FLUSH_MEMSTORE_SIZE;
import static org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource.FLUSH_MEMSTORE_SIZE_DESC;
import static org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource.FLUSH_OUTPUT_SIZE;
import static org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource.FLUSH_OUTPUT_SIZE_DESC;
import static org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource.FLUSH_TIME;
import static org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource.FLUSH_TIME_DESC;
import static org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource.MAJOR_COMPACTED_INPUT_BYTES;
import static org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource.MAJOR_COMPACTED_INPUT_BYTES_DESC;
import static org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource.MAJOR_COMPACTED_OUTPUT_BYTES;
import static org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource.MAJOR_COMPACTED_OUTPUT_BYTES_DESC;
import static org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource.MAJOR_COMPACTION_INPUT_FILE_COUNT;
import static org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource.MAJOR_COMPACTION_INPUT_FILE_COUNT_DESC;
import static org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource.MAJOR_COMPACTION_INPUT_SIZE;
import static org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource.MAJOR_COMPACTION_INPUT_SIZE_DESC;
import static org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource.MAJOR_COMPACTION_OUTPUT_FILE_COUNT;
import static org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource.MAJOR_COMPACTION_OUTPUT_FILE_COUNT_DESC;
import static org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource.MAJOR_COMPACTION_OUTPUT_SIZE;
import static org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource.MAJOR_COMPACTION_OUTPUT_SIZE_DESC;
import static org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource.MAJOR_COMPACTION_TIME;
import static org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource.MAJOR_COMPACTION_TIME_DESC;
import static org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource.SPLIT_KEY;
import static org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource.SPLIT_REQUEST_DESC;
import static org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource.SPLIT_REQUEST_KEY;
import static org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource.SPLIT_SUCCESS_DESC;
import static org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource.SPLIT_SUCCESS_KEY;
@InterfaceAudience.Private
public class MetricsTableSourceImpl implements MetricsTableSource {
@ -92,41 +46,12 @@ public class MetricsTableSourceImpl implements MetricsTableSource {
private final TableName tableName;
private final int hashCode;
// split related metrics
private MutableFastCounter splitRequest;
private MutableFastCounter splitSuccess;
private MetricHistogram splitTimeHisto;
// flush related metrics
private MetricHistogram flushTimeHisto;
private MetricHistogram flushMemstoreSizeHisto;
private MetricHistogram flushOutputSizeHisto;
private MutableFastCounter flushedMemstoreBytes;
private MutableFastCounter flushedOutputBytes;
// compaction related metrics
private MetricHistogram compactionTimeHisto;
private MetricHistogram compactionInputFileCountHisto;
private MetricHistogram compactionInputSizeHisto;
private MetricHistogram compactionOutputFileCountHisto;
private MetricHistogram compactionOutputSizeHisto;
private MutableFastCounter compactedInputBytes;
private MutableFastCounter compactedOutputBytes;
private MetricHistogram majorCompactionTimeHisto;
private MetricHistogram majorCompactionInputFileCountHisto;
private MetricHistogram majorCompactionInputSizeHisto;
private MetricHistogram majorCompactionOutputFileCountHisto;
private MetricHistogram majorCompactionOutputSizeHisto;
private MutableFastCounter majorCompactedInputBytes;
private MutableFastCounter majorCompactedOutputBytes;
public MetricsTableSourceImpl(String tblName,
MetricsTableAggregateSourceImpl aggregate, MetricsTableWrapperAggregate tblWrapperAgg) {
LOG.debug("Creating new MetricsTableSourceImpl for table ");
this.tableName = TableName.valueOf(tblName);
this.agg = aggregate;
agg.register(tblName, this);
this.tableWrapperAgg = tblWrapperAgg;
this.registry = agg.getMetricsRegistry();
this.tableNamePrefix = "Namespace_" + this.tableName.getNamespaceAsString() +
@ -134,79 +59,6 @@ public class MetricsTableSourceImpl implements MetricsTableSource {
this.hashCode = this.tableName.hashCode();
}
@Override
public synchronized void registerMetrics() {
flushTimeHisto = registry.newTimeHistogram(tableNamePrefix + FLUSH_TIME, FLUSH_TIME_DESC);
flushMemstoreSizeHisto =
registry.newSizeHistogram(tableNamePrefix + FLUSH_MEMSTORE_SIZE, FLUSH_MEMSTORE_SIZE_DESC);
flushOutputSizeHisto =
registry.newSizeHistogram(tableNamePrefix + FLUSH_OUTPUT_SIZE, FLUSH_OUTPUT_SIZE_DESC);
flushedOutputBytes =
registry.newCounter(tableNamePrefix + FLUSHED_OUTPUT_BYTES, FLUSHED_OUTPUT_BYTES_DESC, 0L);
flushedMemstoreBytes = registry.newCounter(tableNamePrefix + FLUSHED_MEMSTORE_BYTES,
FLUSHED_MEMSTORE_BYTES_DESC, 0L);
compactionTimeHisto =
registry.newTimeHistogram(tableNamePrefix + COMPACTION_TIME, COMPACTION_TIME_DESC);
compactionInputFileCountHisto = registry.newHistogram(
tableNamePrefix + COMPACTION_INPUT_FILE_COUNT, COMPACTION_INPUT_FILE_COUNT_DESC);
compactionInputSizeHisto = registry.newSizeHistogram(tableNamePrefix + COMPACTION_INPUT_SIZE,
COMPACTION_INPUT_SIZE_DESC);
compactionOutputFileCountHisto = registry.newHistogram(
tableNamePrefix + COMPACTION_OUTPUT_FILE_COUNT, COMPACTION_OUTPUT_FILE_COUNT_DESC);
compactionOutputSizeHisto = registry.newSizeHistogram(tableNamePrefix + COMPACTION_OUTPUT_SIZE,
COMPACTION_OUTPUT_SIZE_DESC);
compactedInputBytes = registry.newCounter(tableNamePrefix + COMPACTED_INPUT_BYTES,
COMPACTED_INPUT_BYTES_DESC, 0L);
compactedOutputBytes = registry.newCounter(tableNamePrefix + COMPACTED_OUTPUT_BYTES,
COMPACTED_OUTPUT_BYTES_DESC, 0L);
majorCompactionTimeHisto = registry.newTimeHistogram(tableNamePrefix + MAJOR_COMPACTION_TIME,
MAJOR_COMPACTION_TIME_DESC);
majorCompactionInputFileCountHisto = registry.newHistogram(
tableNamePrefix + MAJOR_COMPACTION_INPUT_FILE_COUNT, MAJOR_COMPACTION_INPUT_FILE_COUNT_DESC);
majorCompactionInputSizeHisto = registry.newSizeHistogram(
tableNamePrefix + MAJOR_COMPACTION_INPUT_SIZE, MAJOR_COMPACTION_INPUT_SIZE_DESC);
majorCompactionOutputFileCountHisto =
registry.newHistogram(tableNamePrefix + MAJOR_COMPACTION_OUTPUT_FILE_COUNT,
MAJOR_COMPACTION_OUTPUT_FILE_COUNT_DESC);
majorCompactionOutputSizeHisto = registry.newSizeHistogram(
tableNamePrefix + MAJOR_COMPACTION_OUTPUT_SIZE, MAJOR_COMPACTION_OUTPUT_SIZE_DESC);
majorCompactedInputBytes = registry.newCounter(tableNamePrefix + MAJOR_COMPACTED_INPUT_BYTES,
MAJOR_COMPACTED_INPUT_BYTES_DESC, 0L);
majorCompactedOutputBytes = registry.newCounter(tableNamePrefix + MAJOR_COMPACTED_OUTPUT_BYTES,
MAJOR_COMPACTED_OUTPUT_BYTES_DESC, 0L);
splitTimeHisto = registry.newTimeHistogram(tableNamePrefix + SPLIT_KEY);
splitRequest = registry.newCounter(tableNamePrefix + SPLIT_REQUEST_KEY, SPLIT_REQUEST_DESC, 0L);
splitSuccess = registry.newCounter(tableNamePrefix + SPLIT_SUCCESS_KEY, SPLIT_SUCCESS_DESC, 0L);
}
private void deregisterMetrics() {
registry.removeHistogramMetrics(tableNamePrefix + FLUSH_TIME);
registry.removeHistogramMetrics(tableNamePrefix + FLUSH_MEMSTORE_SIZE);
registry.removeHistogramMetrics(tableNamePrefix + FLUSH_OUTPUT_SIZE);
registry.removeMetric(tableNamePrefix + FLUSHED_OUTPUT_BYTES);
registry.removeMetric(tableNamePrefix + FLUSHED_MEMSTORE_BYTES);
registry.removeHistogramMetrics(tableNamePrefix + COMPACTION_TIME);
registry.removeHistogramMetrics(tableNamePrefix + COMPACTION_INPUT_FILE_COUNT);
registry.removeHistogramMetrics(tableNamePrefix + COMPACTION_INPUT_SIZE);
registry.removeHistogramMetrics(tableNamePrefix + COMPACTION_OUTPUT_FILE_COUNT);
registry.removeHistogramMetrics(tableNamePrefix + COMPACTION_OUTPUT_SIZE);
registry.removeMetric(tableNamePrefix + COMPACTED_INPUT_BYTES);
registry.removeMetric(tableNamePrefix + COMPACTED_OUTPUT_BYTES);
registry.removeHistogramMetrics(tableNamePrefix + MAJOR_COMPACTION_TIME);
registry.removeHistogramMetrics(tableNamePrefix + MAJOR_COMPACTION_INPUT_FILE_COUNT);
registry.removeHistogramMetrics(tableNamePrefix + MAJOR_COMPACTION_INPUT_SIZE);
registry.removeHistogramMetrics(tableNamePrefix + MAJOR_COMPACTION_OUTPUT_FILE_COUNT);
registry.removeHistogramMetrics(tableNamePrefix + MAJOR_COMPACTION_OUTPUT_SIZE);
registry.removeMetric(tableNamePrefix + MAJOR_COMPACTED_INPUT_BYTES);
registry.removeMetric(tableNamePrefix + MAJOR_COMPACTED_OUTPUT_BYTES);
registry.removeHistogramMetrics(tableNamePrefix + SPLIT_KEY);
registry.removeMetric(tableNamePrefix + SPLIT_REQUEST_KEY);
registry.removeMetric(tableNamePrefix + SPLIT_SUCCESS_KEY);
}
@Override
public void close() {
boolean wasClosed = closed.getAndSet(true);
@ -218,7 +70,7 @@ public class MetricsTableSourceImpl implements MetricsTableSource {
// Before removing the metrics remove this table from the aggregate table bean.
// This should mean that it's unlikely that snapshot and close happen at the same time.
agg.deleteTableSource(tableName.getNameAsString());
agg.deregister(tableName.getNameAsString());
// While it's un-likely that snapshot and close happen at the same time it's still possible.
// So grab the lock to ensure that all calls to snapshot are done before we remove the metrics
@ -226,7 +78,6 @@ public class MetricsTableSourceImpl implements MetricsTableSource {
if (LOG.isTraceEnabled()) {
LOG.trace("Removing table Metrics for table ");
}
deregisterMetrics();
tableWrapperAgg = null;
}
}
@ -271,52 +122,24 @@ public class MetricsTableSourceImpl implements MetricsTableSource {
}
if (this.tableWrapperAgg != null) {
mrb.addCounter(Interns.info(tableNamePrefix + MetricsRegionServerSource.READ_REQUEST_COUNT,
MetricsRegionServerSource.READ_REQUEST_COUNT_DESC),
tableWrapperAgg.getReadRequestCount(tableName.getNameAsString()));
mrb.addCounter(
Interns.info(tableNamePrefix + MetricsRegionServerSource.FILTERED_READ_REQUEST_COUNT,
MetricsRegionServerSource.FILTERED_READ_REQUEST_COUNT_DESC),
tableWrapperAgg.getFilteredReadRequestCount(tableName.getNameAsString()));
mrb.addCounter(Interns.info(tableNamePrefix + MetricsRegionServerSource.WRITE_REQUEST_COUNT,
MetricsRegionServerSource.WRITE_REQUEST_COUNT_DESC),
tableWrapperAgg.getWriteRequestCount(tableName.getNameAsString()));
mrb.addCounter(Interns.info(tableNamePrefix + MetricsRegionServerSource.TOTAL_REQUEST_COUNT,
MetricsRegionServerSource.TOTAL_REQUEST_COUNT_DESC),
mrb.addCounter(Interns.info(tableNamePrefix + MetricsTableSource.READ_REQUEST_COUNT,
MetricsTableSource.READ_REQUEST_COUNT_DESC),
tableWrapperAgg.getReadRequestsCount(tableName.getNameAsString()));
mrb.addCounter(Interns.info(tableNamePrefix + MetricsTableSource.WRITE_REQUEST_COUNT,
MetricsTableSource.WRITE_REQUEST_COUNT_DESC),
tableWrapperAgg.getWriteRequestsCount(tableName.getNameAsString()));
mrb.addCounter(Interns.info(tableNamePrefix + MetricsTableSource.TOTAL_REQUEST_COUNT,
MetricsTableSource.TOTAL_REQUEST_COUNT_DESC),
tableWrapperAgg.getTotalRequestsCount(tableName.getNameAsString()));
mrb.addGauge(Interns.info(tableNamePrefix + MetricsRegionServerSource.MEMSTORE_SIZE,
MetricsRegionServerSource.MEMSTORE_SIZE_DESC),
tableWrapperAgg.getMemstoreSize(tableName.getNameAsString()));
mrb.addGauge(Interns.info(tableNamePrefix + MetricsRegionServerSource.STOREFILE_COUNT,
MetricsRegionServerSource.STOREFILE_COUNT_DESC),
tableWrapperAgg.getNumStoreFiles(tableName.getNameAsString()));
mrb.addGauge(Interns.info(tableNamePrefix + MetricsRegionServerSource.STOREFILE_SIZE,
MetricsRegionServerSource.STOREFILE_SIZE_DESC),
tableWrapperAgg.getStoreFileSize(tableName.getNameAsString()));
mrb.addGauge(Interns.info(tableNamePrefix + MetricsTableSource.MEMSTORE_SIZE,
MetricsTableSource.MEMSTORE_SIZE_DESC),
tableWrapperAgg.getMemstoresSize(tableName.getNameAsString()));
mrb.addGauge(Interns.info(tableNamePrefix + MetricsTableSource.STORE_FILE_SIZE,
MetricsTableSource.STORE_FILE_SIZE_DESC),
tableWrapperAgg.getStoreFilesSize(tableName.getNameAsString()));
mrb.addGauge(Interns.info(tableNamePrefix + MetricsTableSource.TABLE_SIZE,
MetricsTableSource.TABLE_SIZE_DESC),
tableWrapperAgg.getTableSize(tableName.getNameAsString()));
mrb.addGauge(Interns.info(tableNamePrefix + MetricsRegionServerSource.AVERAGE_REGION_SIZE,
MetricsRegionServerSource.AVERAGE_REGION_SIZE_DESC),
tableWrapperAgg.getAvgRegionSize(tableName.getNameAsString()));
mrb.addGauge(Interns.info(tableNamePrefix + MetricsRegionServerSource.REGION_COUNT,
MetricsRegionServerSource.REGION_COUNT_DESC),
tableWrapperAgg.getNumRegions(tableName.getNameAsString()));
mrb.addGauge(Interns.info(tableNamePrefix + MetricsRegionServerSource.STORE_COUNT,
MetricsRegionServerSource.STORE_COUNT_DESC),
tableWrapperAgg.getNumStores(tableName.getNameAsString()));
mrb.addGauge(Interns.info(tableNamePrefix + MetricsRegionServerSource.MAX_STORE_FILE_AGE,
MetricsRegionServerSource.MAX_STORE_FILE_AGE_DESC),
tableWrapperAgg.getMaxStoreFileAge(tableName.getNameAsString()));
mrb.addGauge(Interns.info(tableNamePrefix + MetricsRegionServerSource.MIN_STORE_FILE_AGE,
MetricsRegionServerSource.MIN_STORE_FILE_AGE_DESC),
tableWrapperAgg.getMinStoreFileAge(tableName.getNameAsString()));
mrb.addGauge(Interns.info(tableNamePrefix + MetricsRegionServerSource.AVG_STORE_FILE_AGE,
MetricsRegionServerSource.AVG_STORE_FILE_AGE_DESC),
tableWrapperAgg.getAvgStoreFileAge(tableName.getNameAsString()));
mrb.addGauge(Interns.info(tableNamePrefix + MetricsRegionServerSource.NUM_REFERENCE_FILES,
MetricsRegionServerSource.NUM_REFERENCE_FILES_DESC),
tableWrapperAgg.getNumReferenceFiles(tableName.getNameAsString()));
}
}
}
@ -345,80 +168,4 @@ public class MetricsTableSourceImpl implements MetricsTableSource {
public String getTableNamePrefix() {
return tableNamePrefix;
}
@Override
public void incrSplitRequest() {
splitRequest.incr();
}
@Override
public void incrSplitSuccess() {
splitSuccess.incr();
}
@Override
public void updateSplitTime(long t) {
splitTimeHisto.add(t);
}
@Override
public void updateFlushTime(long t) {
flushTimeHisto.add(t);
}
@Override
public void updateFlushMemstoreSize(long bytes) {
flushMemstoreSizeHisto.add(bytes);
flushedMemstoreBytes.incr(bytes);
}
@Override
public void updateFlushOutputSize(long bytes) {
flushOutputSizeHisto.add(bytes);
flushedOutputBytes.incr(bytes);
}
@Override
public void updateCompactionTime(boolean isMajor, long t) {
compactionTimeHisto.add(t);
if (isMajor) {
majorCompactionTimeHisto.add(t);
}
}
@Override
public void updateCompactionInputFileCount(boolean isMajor, long c) {
compactionInputFileCountHisto.add(c);
if (isMajor) {
majorCompactionInputFileCountHisto.add(c);
}
}
@Override
public void updateCompactionInputSize(boolean isMajor, long bytes) {
compactionInputSizeHisto.add(bytes);
compactedInputBytes.incr(bytes);
if (isMajor) {
majorCompactionInputSizeHisto.add(bytes);
majorCompactedInputBytes.incr(bytes);
}
}
@Override
public void updateCompactionOutputFileCount(boolean isMajor, long c) {
compactionOutputFileCountHisto.add(c);
if (isMajor) {
majorCompactionOutputFileCountHisto.add(c);
}
}
@Override
public void updateCompactionOutputSize(boolean isMajor, long bytes) {
compactionOutputSizeHisto.add(bytes);
compactedOutputBytes.incr(bytes);
if (isMajor) {
majorCompactionOutputSizeHisto.add(bytes);
majorCompactedOutputBytes.incr(bytes);
}
}
}

View File

@ -191,10 +191,5 @@ public class TestMetricsRegionSourceImpl {
public long getMaxFlushQueueSize() {
return 0;
}
@Override
public long getTotalRequestCount() {
return 0;
}
}
}

View File

@ -40,12 +40,9 @@ public class TestMetricsTableSourceImpl {
MetricsRegionServerSourceFactory metricsFact =
CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class);
MetricsTableSource one = metricsFact.createTable(
"ONETABLE", new MetricsTableWrapperStub("ONETABLE"));
MetricsTableSource oneClone = metricsFact.createTable(
"ONETABLE", new MetricsTableWrapperStub("ONETABLE"));
MetricsTableSource two = metricsFact.createTable(
"TWOTABLE", new MetricsTableWrapperStub("TWOTABLE"));
MetricsTableSource one = metricsFact.createTable("ONETABLE", new TableWrapperStub("ONETABLE"));
MetricsTableSource oneClone = metricsFact.createTable("ONETABLE", new TableWrapperStub("ONETABLE"));
MetricsTableSource two = metricsFact.createTable("TWOTABLE", new TableWrapperStub("TWOTABLE"));
assertEquals(0, one.compareTo(oneClone));
assertEquals(one.hashCode(), oneClone.hashCode());
@ -68,7 +65,50 @@ public class TestMetricsTableSourceImpl {
public void testGetTableMetrics() throws Exception{
MetricsTableSource oneTbl =
CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class)
.createTable("ONETABLE", new MetricsTableWrapperStub("ONETABLE"));
.createTable("ONETABLE", new TableWrapperStub("ONETABLE"));
assertEquals("ONETABLE", oneTbl.getTableName());
}
static class TableWrapperStub implements MetricsTableWrapperAggregate {
private String tableName;
public TableWrapperStub(String tableName) {
this.tableName = tableName;
}
@Override
public long getReadRequestsCount(String table) {
return 10;
}
@Override
public long getWriteRequestsCount(String table) {
return 20;
}
@Override
public long getTotalRequestsCount(String table) {
return 30;
}
@Override
public long getMemstoresSize(String table) {
return 1000;
}
@Override
public long getStoreFilesSize(String table) {
return 2000;
}
@Override
public long getTableSize(String table) {
return 3000;
}
public String getTableName() {
return tableName;
}
}
}

View File

@ -2703,9 +2703,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
status.setStatus(msg);
if (rsServices != null && rsServices.getMetrics() != null) {
rsServices.getMetrics().updateFlush(
getTableDesc().getTableName().getNameAsString(),
time - startTime,
rsServices.getMetrics().updateFlush(time - startTime,
totalFlushableSizeOfFlushableStores, flushedOutputFileSize);
}

View File

@ -1450,7 +1450,7 @@ public class HRegionServer extends HasThread implements
this.walFactory = setupWALAndReplication();
// Init in here rather than in constructor after thread name has been set
this.metricsRegionServer = new MetricsRegionServer(
new MetricsRegionServerWrapperImpl(this), conf, metricsTable);
new MetricsRegionServerWrapperImpl(this), conf);
this.metricsTable = new MetricsTable(new MetricsTableWrapperAggregateImpl(this));
// Now that we have a metrics source, start the pause monitor
this.pauseMonitor = new JvmPauseMonitor(conf, getMetrics().getMetricsSource());

View File

@ -1380,10 +1380,9 @@ public class HStore implements Store {
long now = EnvironmentEdgeManager.currentTime();
if (region.getRegionServerServices() != null
&& region.getRegionServerServices().getMetrics() != null) {
region.getRegionServerServices().getMetrics().updateCompaction(
region.getTableDesc().getTableName().getNameAsString(),
cr.isMajor(), now - compactionStartTime, cr.getFiles().size(),
newFiles.size(), cr.getSize(), outputBytes);
region.getRegionServerServices().getMetrics().updateCompaction(cr.isMajor(),
now - compactionStartTime, cr.getFiles().size(), newFiles.size(), cr.getSize(),
outputBytes);
}
logCompactionEndMessage(cr, sfs, now, compactionStartTime);
@ -2253,7 +2252,7 @@ public class HStore implements Store {
latestTS = (createdTS > latestTS) ? createdTS : latestTS;
}
long now = EnvironmentEdgeManager.currentTime();
return latestTS == 0 ? 0 : now - latestTS;
return now - latestTS;
}
@Override

View File

@ -46,18 +46,15 @@ public class MetricsRegionServer {
private MetricsRegionServerSource serverSource;
private MetricsRegionServerWrapper regionServerWrapper;
private RegionServerTableMetrics tableMetrics;
private final MetricsTable metricsTable;
private MetricRegistry metricRegistry;
private Timer bulkLoadTimer;
public MetricsRegionServer(MetricsRegionServerWrapper regionServerWrapper, Configuration conf,
MetricsTable metricsTable) {
public MetricsRegionServer(MetricsRegionServerWrapper regionServerWrapper, Configuration conf) {
this(regionServerWrapper,
CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class)
.createServer(regionServerWrapper),
createTableMetrics(conf),
metricsTable);
createTableMetrics(conf));
// Create hbase-metrics module based metrics. The registry should already be registered by the
// MetricsRegionServerSource
@ -69,12 +66,10 @@ public class MetricsRegionServer {
MetricsRegionServer(MetricsRegionServerWrapper regionServerWrapper,
MetricsRegionServerSource serverSource,
RegionServerTableMetrics tableMetrics,
MetricsTable metricsTable) {
RegionServerTableMetrics tableMetrics) {
this.regionServerWrapper = regionServerWrapper;
this.serverSource = serverSource;
this.tableMetrics = tableMetrics;
this.metricsTable = metricsTable;
}
/**
@ -198,33 +193,19 @@ public class MetricsRegionServer {
serverSource.incrSplitSuccess();
}
public void updateFlush(String table, long t, long memstoreSize, long fileSize) {
public void updateFlush(long t, long memstoreSize, long fileSize) {
serverSource.updateFlushTime(t);
serverSource.updateFlushMemstoreSize(memstoreSize);
serverSource.updateFlushOutputSize(fileSize);
if (table != null) {
metricsTable.updateFlushTime(table, memstoreSize);
metricsTable.updateFlushMemstoreSize(table, memstoreSize);
metricsTable.updateFlushOutputSize(table, fileSize);
}
}
public void updateCompaction(String table, boolean isMajor, long t, int inputFileCount,
int outputFileCount, long inputBytes, long outputBytes) {
public void updateCompaction(boolean isMajor, long t, int inputFileCount, int outputFileCount,
long inputBytes, long outputBytes) {
serverSource.updateCompactionTime(isMajor, t);
serverSource.updateCompactionInputFileCount(isMajor, inputFileCount);
serverSource.updateCompactionOutputFileCount(isMajor, outputFileCount);
serverSource.updateCompactionInputSize(isMajor, inputBytes);
serverSource.updateCompactionOutputSize(isMajor, outputBytes);
if (table != null) {
metricsTable.updateCompactionTime(table, isMajor, t);
metricsTable.updateCompactionInputFileCount(table, isMajor, inputFileCount);
metricsTable.updateCompactionOutputFileCount(table, isMajor, outputFileCount);
metricsTable.updateCompactionInputSize(table, isMajor, inputBytes);
metricsTable.updateCompactionOutputSize(table, isMajor, outputBytes);
}
}
public void updateBulkLoad(long millis) {

View File

@ -154,11 +154,6 @@ public class MetricsRegionWrapperImpl implements MetricsRegionWrapper, Closeable
return now - lastMajorCompactionTs;
}
@Override
public long getTotalRequestCount() {
return getReadRequestCount() + getWriteRequestCount();
}
@Override
public long getNumCompactionsFailed() {
return this.region.compactionsFailed.get();

View File

@ -24,67 +24,19 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
@InterfaceAudience.Private
public class MetricsTable {
private final MetricsTableAggregateSource tableSourceAgg;
private MetricsTableWrapperAggregate wrapper;
private MetricsTableWrapperAggregate tableWrapperAgg;
public MetricsTable(final MetricsTableWrapperAggregate wrapper) {
tableSourceAgg = CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class)
.getTableAggregate();
this.wrapper = wrapper;
this.tableWrapperAgg = wrapper;
}
public MetricsTableWrapperAggregate getTableWrapperAgg() {
return wrapper;
return tableWrapperAgg;
}
public MetricsTableAggregateSource getTableSourceAgg() {
return tableSourceAgg;
}
public void incrSplitRequest(String table) {
tableSourceAgg.getOrCreateTableSource(table, wrapper).incrSplitRequest();
}
public void incrSplitSuccess(String table) {
tableSourceAgg.getOrCreateTableSource(table, wrapper).incrSplitSuccess();
}
public void updateSplitTime(String table, long t) {
tableSourceAgg.getOrCreateTableSource(table, wrapper).updateSplitTime(t);
}
public void updateFlushTime(String table, long t) {
tableSourceAgg.getOrCreateTableSource(table, wrapper).updateFlushTime(t);
}
public void updateFlushMemstoreSize(String table, long bytes) {
tableSourceAgg.getOrCreateTableSource(table, wrapper).updateFlushMemstoreSize(bytes);
}
public void updateFlushOutputSize(String table, long bytes) {
tableSourceAgg.getOrCreateTableSource(table, wrapper).updateFlushOutputSize(bytes);
}
public void updateCompactionTime(String table, boolean isMajor, long t) {
tableSourceAgg.getOrCreateTableSource(table, wrapper).updateCompactionTime(isMajor, t);
}
public void updateCompactionInputFileCount(String table, boolean isMajor, long c) {
tableSourceAgg.getOrCreateTableSource(table, wrapper)
.updateCompactionInputFileCount(isMajor, c);
}
public void updateCompactionInputSize(String table, boolean isMajor, long bytes) {
tableSourceAgg.getOrCreateTableSource(table, wrapper)
.updateCompactionInputSize(isMajor, bytes);
}
public void updateCompactionOutputFileCount(String table, boolean isMajor, long c) {
tableSourceAgg.getOrCreateTableSource(table, wrapper)
.updateCompactionOutputFileCount(isMajor, c);
}
public void updateCompactionOutputSize(String table, boolean isMajor, long bytes) {
tableSourceAgg.getOrCreateTableSource(table, wrapper)
.updateCompactionOutputSize(isMajor, bytes);
}
}

View File

@ -43,8 +43,7 @@ public class MetricsTableWrapperAggregateImpl implements MetricsTableWrapperAggr
private Runnable runnable;
private long period;
private ScheduledFuture<?> tableMetricsUpdateTask;
private ConcurrentHashMap<TableName, MetricsTableValues> metricsTableMap
= new ConcurrentHashMap<>();
private ConcurrentHashMap<TableName, MetricsTableValues> metricsTableMap = new ConcurrentHashMap<>();
public MetricsTableWrapperAggregateImpl(final HRegionServer regionServer) {
this.regionServer = regionServer;
@ -52,8 +51,8 @@ public class MetricsTableWrapperAggregateImpl implements MetricsTableWrapperAggr
HConstants.DEFAULT_REGIONSERVER_METRICS_PERIOD) + 1000;
this.executor = CompatibilitySingletonFactory.getInstance(MetricsExecutor.class).getExecutor();
this.runnable = new TableMetricsWrapperRunnable();
this.tableMetricsUpdateTask = this.executor.scheduleWithFixedDelay(this.runnable, period,
this.period, TimeUnit.MILLISECONDS);
this.tableMetricsUpdateTask = this.executor.scheduleWithFixedDelay(this.runnable, period, this.period,
TimeUnit.MILLISECONDS);
}
public class TableMetricsWrapperRunnable implements Runnable {
@ -63,43 +62,33 @@ public class MetricsTableWrapperAggregateImpl implements MetricsTableWrapperAggr
Map<TableName, MetricsTableValues> localMetricsTableMap = new HashMap<>();
for (Region r : regionServer.getOnlineRegionsLocalContext()) {
TableName tbl = r.getTableDesc().getTableName();
MetricsTableValues mt = localMetricsTableMap.get(tbl);
if (mt == null) {
mt = new MetricsTableValues();
localMetricsTableMap.put(tbl, mt);
TableName tbl= r.getTableDesc().getTableName();
MetricsTableValues metricsTable = localMetricsTableMap.get(tbl);
if (metricsTable == null) {
metricsTable = new MetricsTableValues();
localMetricsTableMap.put(tbl, metricsTable);
}
if (r.getStores() != null) {
for (Store store : r.getStores()) {
mt.storeFileCount += store.getStorefilesCount();
mt.memstoreSize += store.getMemStoreSize();
mt.storeFileSize += store.getStorefilesSize();
mt.referenceFileCount += store.getNumReferenceFiles();
mt.maxStoreFileAge = Math.max(mt.maxStoreFileAge, store.getMaxStoreFileAge());
mt.minStoreFileAge = Math.min(mt.minStoreFileAge, store.getMinStoreFileAge());
mt.totalStoreFileAge = store.getAvgStoreFileAge() * store.getStorefilesCount();
mt.storeCount += 1;
}
long tempStorefilesSize = 0;
for (Store store : r.getStores()) {
tempStorefilesSize += store.getStorefilesSize();
}
mt.regionCount += 1;
mt.readRequestCount += r.getReadRequestsCount();
mt.filteredReadRequestCount += getFilteredReadRequestCount(tbl.getNameAsString());
mt.writeRequestCount += r.getWriteRequestsCount();
metricsTable.setMemstoresSize(metricsTable.getMemstoresSize() + r.getMemstoreSize());
metricsTable.setStoreFilesSize(metricsTable.getStoreFilesSize() + tempStorefilesSize);
metricsTable.setTableSize(metricsTable.getMemstoresSize() + metricsTable.getStoreFilesSize());
metricsTable.setReadRequestsCount(metricsTable.getReadRequestsCount() + r.getReadRequestsCount());
metricsTable.setWriteRequestsCount(metricsTable.getWriteRequestsCount() + r.getWriteRequestsCount());
metricsTable.setTotalRequestsCount(metricsTable.getReadRequestsCount() + metricsTable.getWriteRequestsCount());
}
for (Map.Entry<TableName, MetricsTableValues> entry : localMetricsTableMap.entrySet()) {
for(Map.Entry<TableName, MetricsTableValues> entry : localMetricsTableMap.entrySet()) {
TableName tbl = entry.getKey();
if (metricsTableMap.get(tbl) == null) {
// this will add the Wrapper to the list of TableMetrics
MetricsTableSource tableSource = CompatibilitySingletonFactory
.getInstance(MetricsRegionServerSourceFactory.class).createTable(tbl.getNameAsString(),
MetricsTableWrapperAggregateImpl.this);
CompatibilitySingletonFactory
.getInstance(MetricsRegionServerSourceFactory.class)
.getTableAggregate()
.getOrCreateTableSource(tbl.getNameAsString(), MetricsTableWrapperAggregateImpl.this);
.getInstance(MetricsRegionServerSourceFactory.class).getTableAggregate()
.register(tbl.getNameAsString(), tableSource);
}
metricsTableMap.put(entry.getKey(), entry.getValue());
}
@ -108,7 +97,7 @@ public class MetricsTableWrapperAggregateImpl implements MetricsTableWrapperAggr
MetricsTableAggregateSource agg = CompatibilitySingletonFactory
.getInstance(MetricsRegionServerSourceFactory.class).getTableAggregate();
for (TableName table : existingTableNames) {
agg.deleteTableSource(table.getNameAsString());
agg.deregister(table.getNameAsString());
if (metricsTableMap.get(table) != null) {
metricsTableMap.remove(table);
}
@ -117,145 +106,57 @@ public class MetricsTableWrapperAggregateImpl implements MetricsTableWrapperAggr
}
@Override
public long getReadRequestCount(String table) {
public long getReadRequestsCount(String table) {
MetricsTableValues metricsTable = metricsTableMap.get(TableName.valueOf(table));
if (metricsTable == null)
return 0;
else
return metricsTable.readRequestCount;
return metricsTable.getReadRequestsCount();
}
@Override
public long getFilteredReadRequestCount(String table) {
public long getWriteRequestsCount(String table) {
MetricsTableValues metricsTable = metricsTableMap.get(TableName.valueOf(table));
if (metricsTable == null) {
if (metricsTable == null)
return 0;
}
return metricsTable.filteredReadRequestCount;
}
@Override
public long getWriteRequestCount(String table) {
MetricsTableValues metricsTable = metricsTableMap.get(TableName.valueOf(table));
if (metricsTable == null) {
return 0;
}
return metricsTable.writeRequestCount;
else
return metricsTable.getWriteRequestsCount();
}
@Override
public long getTotalRequestsCount(String table) {
MetricsTableValues metricsTable = metricsTableMap.get(TableName.valueOf(table));
if (metricsTable == null) {
if (metricsTable == null)
return 0;
}
return metricsTable.readRequestCount + metricsTable.writeRequestCount;
else
return metricsTable.getTotalRequestsCount();
}
@Override
public long getMemstoreSize(String table) {
public long getMemstoresSize(String table) {
MetricsTableValues metricsTable = metricsTableMap.get(TableName.valueOf(table));
if (metricsTable == null) {
if (metricsTable == null)
return 0;
}
return metricsTable.memstoreSize;
else
return metricsTable.getMemstoresSize();
}
@Override
public long getStoreFileSize(String table) {
public long getStoreFilesSize(String table) {
MetricsTableValues metricsTable = metricsTableMap.get(TableName.valueOf(table));
if (metricsTable == null) {
if (metricsTable == null)
return 0;
}
return metricsTable.storeFileSize;
else
return metricsTable.getStoreFilesSize();
}
@Override
public long getTableSize(String table) {
MetricsTableValues metricsTable = metricsTableMap.get(TableName.valueOf(table));
if (metricsTable == null) {
if (metricsTable == null)
return 0;
}
return metricsTable.memstoreSize + metricsTable.storeFileSize;
}
@Override
public long getNumRegions(String table) {
MetricsTableValues metricsTable = metricsTableMap.get(TableName.valueOf(table));
if (metricsTable == null) {
return 0;
}
return metricsTable.regionCount;
}
@Override
public long getNumStores(String table) {
MetricsTableValues metricsTable = metricsTableMap.get(TableName.valueOf(table));
if (metricsTable == null) {
return 0;
}
return metricsTable.storeCount;
}
@Override
public long getNumStoreFiles(String table) {
MetricsTableValues metricsTable = metricsTableMap.get(TableName.valueOf(table));
if (metricsTable == null) {
return 0;
}
return metricsTable.storeFileCount;
}
@Override
public long getMaxStoreFileAge(String table) {
MetricsTableValues metricsTable = metricsTableMap.get(TableName.valueOf(table));
if (metricsTable == null) {
return 0;
}
return metricsTable.maxStoreFileAge;
}
@Override
public long getMinStoreFileAge(String table) {
MetricsTableValues metricsTable = metricsTableMap.get(TableName.valueOf(table));
if (metricsTable == null) {
return 0;
}
return metricsTable.minStoreFileAge == Long.MAX_VALUE ? 0 : metricsTable.minStoreFileAge;
}
@Override
public long getAvgStoreFileAge(String table) {
MetricsTableValues metricsTable = metricsTableMap.get(TableName.valueOf(table));
if (metricsTable == null) {
return 0;
}
return metricsTable.storeFileCount == 0
? 0
: (metricsTable.totalStoreFileAge / metricsTable.storeFileCount);
}
@Override
public long getNumReferenceFiles(String table) {
MetricsTableValues metricsTable = metricsTableMap.get(TableName.valueOf(table));
if (metricsTable == null) {
return 0;
}
return metricsTable.referenceFileCount;
}
@Override
public long getAvgRegionSize(String table) {
MetricsTableValues metricsTable = metricsTableMap.get(TableName.valueOf(table));
if (metricsTable == null) {
return 0;
}
return metricsTable.regionCount == 0
? 0
: (metricsTable.memstoreSize + metricsTable.storeFileSize) / metricsTable.regionCount;
else
return metricsTable.getTableSize();
}
@Override
@ -264,18 +165,61 @@ public class MetricsTableWrapperAggregateImpl implements MetricsTableWrapperAggr
}
private static class MetricsTableValues {
long readRequestCount;
long filteredReadRequestCount;
long writeRequestCount;
long memstoreSize;
long regionCount;
long storeCount;
long storeFileCount;
long storeFileSize;
long maxStoreFileAge;
long minStoreFileAge = Long.MAX_VALUE;
long totalStoreFileAge;
long referenceFileCount;
private long totalRequestsCount;
private long readRequestsCount;
private long writeRequestsCount;
private long memstoresSize;
private long storeFilesSize;
private long tableSize;
public long getTotalRequestsCount() {
return totalRequestsCount;
}
public void setTotalRequestsCount(long totalRequestsCount) {
this.totalRequestsCount = totalRequestsCount;
}
public long getReadRequestsCount() {
return readRequestsCount;
}
public void setReadRequestsCount(long readRequestsCount) {
this.readRequestsCount = readRequestsCount;
}
public long getWriteRequestsCount() {
return writeRequestsCount;
}
public void setWriteRequestsCount(long writeRequestsCount) {
this.writeRequestsCount = writeRequestsCount;
}
public long getMemstoresSize() {
return memstoresSize;
}
public void setMemstoresSize(long memstoresSize) {
this.memstoresSize = memstoresSize;
}
public long getStoreFilesSize() {
return storeFilesSize;
}
public void setStoreFilesSize(long storeFilesSize) {
this.storeFilesSize = storeFilesSize;
}
public long getTableSize() {
return tableSize;
}
public void setTableSize(long tableSize) {
this.tableSize = tableSize;
}
}
}

View File

@ -157,9 +157,4 @@ public class MetricsRegionWrapperStub implements MetricsRegionWrapper {
public long getMaxFlushQueueSize() {
return 6;
}
@Override
public long getTotalRequestCount() {
return 0;
}
}

View File

@ -27,12 +27,12 @@ public class MetricsTableWrapperStub implements MetricsTableWrapperAggregate {
}
@Override
public long getReadRequestCount(String table) {
public long getReadRequestsCount(String table) {
return 10;
}
@Override
public long getWriteRequestCount(String table) {
public long getWriteRequestsCount(String table) {
return 20;
}
@ -42,17 +42,12 @@ public class MetricsTableWrapperStub implements MetricsTableWrapperAggregate {
}
@Override
public long getFilteredReadRequestCount(String table) {
return 40;
}
@Override
public long getMemstoreSize(String table) {
public long getMemstoresSize(String table) {
return 1000;
}
@Override
public long getStoreFileSize(String table) {
public long getStoreFilesSize(String table) {
return 2000;
}
@ -64,44 +59,4 @@ public class MetricsTableWrapperStub implements MetricsTableWrapperAggregate {
public String getTableName() {
return tableName;
}
@Override
public long getNumRegions(String table) {
return 11;
}
@Override
public long getNumStores(String table) {
return 22;
}
@Override
public long getNumStoreFiles(String table) {
return 33;
}
@Override
public long getMaxStoreFileAge(String table) {
return 44;
}
@Override
public long getMinStoreFileAge(String table) {
return 55;
}
@Override
public long getAvgStoreFileAge(String table) {
return 66;
}
@Override
public long getNumReferenceFiles(String table) {
return 77;
}
@Override
public long getAvgRegionSize(String table) {
return 88;
}
}

View File

@ -49,7 +49,7 @@ public class TestMetricsRegionServer {
@Before
public void setUp() {
wrapper = new MetricsRegionServerWrapperStub();
rsm = new MetricsRegionServer(wrapper, new Configuration(false), null);
rsm = new MetricsRegionServer(wrapper, new Configuration(false));
serverSource = rsm.getMetricsSource();
}
@ -175,14 +175,14 @@ public class TestMetricsRegionServer {
@Test
public void testFlush() {
rsm.updateFlush(null, 1, 2, 3);
rsm.updateFlush(1, 2, 3);
HELPER.assertCounter("flushTime_num_ops", 1, serverSource);
HELPER.assertCounter("flushMemstoreSize_num_ops", 1, serverSource);
HELPER.assertCounter("flushOutputSize_num_ops", 1, serverSource);
HELPER.assertCounter("flushedMemstoreBytes", 2, serverSource);
HELPER.assertCounter("flushedOutputBytes", 3, serverSource);
rsm.updateFlush(null, 10, 20, 30);
rsm.updateFlush(10, 20, 30);
HELPER.assertCounter("flushTimeNumOps", 2, serverSource);
HELPER.assertCounter("flushMemstoreSize_num_ops", 2, serverSource);
HELPER.assertCounter("flushOutputSize_num_ops", 2, serverSource);
@ -192,7 +192,7 @@ public class TestMetricsRegionServer {
@Test
public void testCompaction() {
rsm.updateCompaction(null, false, 1, 2, 3, 4, 5);
rsm.updateCompaction(false, 1, 2, 3, 4, 5);
HELPER.assertCounter("compactionTime_num_ops", 1, serverSource);
HELPER.assertCounter("compactionInputFileCount_num_ops", 1, serverSource);
HELPER.assertCounter("compactionInputSize_num_ops", 1, serverSource);
@ -200,7 +200,7 @@ public class TestMetricsRegionServer {
HELPER.assertCounter("compactedInputBytes", 4, serverSource);
HELPER.assertCounter("compactedoutputBytes", 5, serverSource);
rsm.updateCompaction(null, false, 10, 20, 30, 40, 50);
rsm.updateCompaction(false, 10, 20, 30, 40, 50);
HELPER.assertCounter("compactionTime_num_ops", 2, serverSource);
HELPER.assertCounter("compactionInputFileCount_num_ops", 2, serverSource);
HELPER.assertCounter("compactionInputSize_num_ops", 2, serverSource);
@ -209,7 +209,7 @@ public class TestMetricsRegionServer {
HELPER.assertCounter("compactedoutputBytes", 55, serverSource);
// do major compaction
rsm.updateCompaction(null, true, 100, 200, 300, 400, 500);
rsm.updateCompaction(true, 100, 200, 300, 400, 500);
HELPER.assertCounter("compactionTime_num_ops", 3, serverSource);
HELPER.assertCounter("compactionInputFileCount_num_ops", 3, serverSource);

View File

@ -18,14 +18,11 @@
package org.apache.hadoop.hbase.regionserver;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.CompatibilityFactory;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
import org.apache.hadoop.hbase.test.MetricsAssertHelper;
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@ -35,103 +32,21 @@ public class TestMetricsTableAggregate {
public static MetricsAssertHelper HELPER =
CompatibilityFactory.getInstance(MetricsAssertHelper.class);
private String tableName = "testTableMetrics";
private String pre = "Namespace_default_table_" + tableName + "_metric_";
private MetricsTableWrapperStub tableWrapper;
private MetricsTable mt;
private MetricsRegionServerWrapper rsWrapper;
private MetricsRegionServer rsm;
private MetricsTableAggregateSource agg;
@BeforeClass
public static void classSetUp() {
HELPER.init();
}
@Before
public void setUp() {
tableWrapper = new MetricsTableWrapperStub(tableName);
mt = new MetricsTable(tableWrapper);
rsWrapper = new MetricsRegionServerWrapperStub();
Configuration conf = new Configuration();
rsm = new MetricsRegionServer(rsWrapper, conf, mt);
agg = mt.getTableSourceAgg();
}
@Test
public void testRequestMetrics() throws IOException {
HELPER.assertCounter(pre + "readRequestCount", 10, agg);
HELPER.assertCounter(pre + "writeRequestCount", 20, agg);
HELPER.assertCounter(pre + "totalRequestCount", 30, agg);
}
public void testTableWrapperAggregateMetrics() throws IOException {
String tableName = "testTableMetrics";
MetricsTableWrapperStub tableWrapper = new MetricsTableWrapperStub(tableName);
CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class)
.createTable(tableName, tableWrapper);
MetricsTableAggregateSource agg = CompatibilitySingletonFactory
.getInstance(MetricsRegionServerSourceFactory.class).getTableAggregate();
@Test
public void testRegionAndStoreMetrics() throws IOException {
HELPER.assertGauge(pre + "memstoreSize", 1000, agg);
HELPER.assertGauge(pre + "storeFileSize", 2000, agg);
HELPER.assertGauge(pre + "tableSize", 3000, agg);
HELPER.assertCounter("Namespace_default_table_testTableMetrics_metric_readRequestCount", 10, agg);
HELPER.assertCounter("Namespace_default_table_testTableMetrics_metric_writeRequestCount", 20, agg);
HELPER.assertCounter("Namespace_default_table_testTableMetrics_metric_totalRequestCount", 30, agg);
HELPER.assertGauge(pre + "regionCount", 11, agg);
HELPER.assertGauge(pre + "storeCount", 22, agg);
HELPER.assertGauge(pre + "storeFileCount", 33, agg);
HELPER.assertGauge(pre + "maxStoreFileAge", 44, agg);
HELPER.assertGauge(pre + "minStoreFileAge", 55, agg);
HELPER.assertGauge(pre + "avgStoreFileAge", 66, agg);
HELPER.assertGauge(pre + "numReferenceFiles", 77, agg);
HELPER.assertGauge(pre + "averageRegionSize", 88, agg);
}
@Test
public void testFlush() {
rsm.updateFlush(tableName, 1, 2, 3);
HELPER.assertCounter(pre + "flushTime_num_ops", 1, agg);
HELPER.assertCounter(pre + "flushMemstoreSize_num_ops", 1, agg);
HELPER.assertCounter(pre + "flushOutputSize_num_ops", 1, agg);
HELPER.assertCounter(pre + "flushedMemstoreBytes", 2, agg);
HELPER.assertCounter(pre + "flushedOutputBytes", 3, agg);
rsm.updateFlush(tableName, 10, 20, 30);
HELPER.assertCounter(pre + "flushTime_num_ops", 2, agg);
HELPER.assertCounter(pre + "flushMemstoreSize_num_ops", 2, agg);
HELPER.assertCounter(pre + "flushOutputSize_num_ops", 2, agg);
HELPER.assertCounter(pre + "flushedMemstoreBytes", 22, agg);
HELPER.assertCounter(pre + "flushedOutputBytes", 33, agg);
}
@Test
public void testCompaction() {
rsm.updateCompaction(tableName, false, 1, 2, 3, 4, 5);
HELPER.assertCounter(pre + "compactionTime_num_ops", 1, agg);
HELPER.assertCounter(pre + "compactionInputFileCount_num_ops", 1, agg);
HELPER.assertCounter(pre + "compactionInputSize_num_ops", 1, agg);
HELPER.assertCounter(pre + "compactionOutputFileCount_num_ops", 1, agg);
HELPER.assertCounter(pre + "compactedInputBytes", 4, agg);
HELPER.assertCounter(pre + "compactedoutputBytes", 5, agg);
rsm.updateCompaction(tableName, false, 10, 20, 30, 40, 50);
HELPER.assertCounter(pre + "compactionTime_num_ops", 2, agg);
HELPER.assertCounter(pre + "compactionInputFileCount_num_ops", 2, agg);
HELPER.assertCounter(pre + "compactionInputSize_num_ops", 2, agg);
HELPER.assertCounter(pre + "compactionOutputFileCount_num_ops", 2, agg);
HELPER.assertCounter(pre + "compactedInputBytes", 44, agg);
HELPER.assertCounter(pre + "compactedoutputBytes", 55, agg);
// do major compaction
rsm.updateCompaction(tableName, true, 100, 200, 300, 400, 500);
HELPER.assertCounter(pre + "compactionTime_num_ops", 3, agg);
HELPER.assertCounter(pre + "compactionInputFileCount_num_ops", 3, agg);
HELPER.assertCounter(pre + "compactionInputSize_num_ops", 3, agg);
HELPER.assertCounter(pre + "compactionOutputFileCount_num_ops", 3, agg);
HELPER.assertCounter(pre + "compactedInputBytes", 444, agg);
HELPER.assertCounter(pre + "compactedoutputBytes", 555, agg);
HELPER.assertCounter(pre + "majorCompactionTime_num_ops", 1, agg);
HELPER.assertCounter(pre + "majorCompactionInputFileCount_num_ops", 1, agg);
HELPER.assertCounter(pre + "majorCompactionInputSize_num_ops", 1, agg);
HELPER.assertCounter(pre + "majorCompactionOutputFileCount_num_ops", 1, agg);
HELPER.assertCounter(pre + "majorCompactedInputBytes", 400, agg);
HELPER.assertCounter(pre + "majorCompactedoutputBytes", 500, agg);
HELPER.assertGauge("Namespace_default_table_testTableMetrics_metric_memstoreSize", 1000, agg);
HELPER.assertGauge("Namespace_default_table_testTableMetrics_metric_storeFileSize", 2000, agg);
HELPER.assertGauge("Namespace_default_table_testTableMetrics_metric_tableSize", 3000, agg);
}
}