HBASE-6410 Move RegionServer Metrics to metrics2

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1406396 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
eclark 2012-11-06 23:22:01 +00:00
parent 60caa2cedf
commit 3900978ffe
174 changed files with 3877 additions and 4749 deletions

View File

@ -50,7 +50,7 @@
<!-- This is read by a thread from hadoop and findbugs never finds it -->
<Match>
<Bug code="UrF"/>
<Class name="org.apache.hadoop.hbase.metrics.BaseMetricsSourceImpl"/>
<Class name="org.apache.hadoop.hbase.metrics.BaseSourceImpl"/>
</Match>
<Match>

View File

@ -44,6 +44,7 @@ public class CompatibilitySingletonFactory extends CompatibilityFactory {
*
* @return the singleton
*/
@SuppressWarnings("unchecked")
public static synchronized <T> T getInstance(Class<T> klass) {
T instance = (T) instances.get(klass);
if (instance == null) {

View File

@ -16,29 +16,29 @@
* limitations under the License.
*/
package org.apache.hadoop.hbase.master.metrics;
package org.apache.hadoop.hbase.master;
import org.apache.hadoop.hbase.metrics.BaseMetricsSource;
import org.apache.hadoop.hbase.metrics.BaseSource;
/**
* Interface that classes that expose metrics about the master will implement.
*/
public interface MasterMetricsSource extends BaseMetricsSource {
public interface MetricsMasterSource extends BaseSource {
/**
* The name of the metrics
*/
static final String METRICS_NAME = "HMaster";
static final String METRICS_NAME = "Server";
/**
* The context metrics will be under.
*/
static final String METRICS_CONTEXT = "hmaster";
static final String METRICS_CONTEXT = "master";
/**
* The name of the metrics context that metrics will be under in jmx
*/
static final String METRICS_JMX_CONTEXT = "HMaster";
static final String METRICS_JMX_CONTEXT = "Master,sub=" + METRICS_NAME;
/**
* Description
@ -76,24 +76,28 @@ public interface MasterMetricsSource extends BaseMetricsSource {
/**
* Increment the number of requests the cluster has seen.
*
* @param inc Ammount to increment the total by.
*/
void incRequests(final int inc);
/**
* Set the number of regions in transition.
*
* @param ritCount count of the regions in transition.
*/
void setRIT(int ritCount);
/**
* Set the count of the number of regions that have been in transition over the threshold time.
*
* @param ritCountOverThreshold number of regions in transition for longer than threshold.
*/
void setRITCountOverThreshold(int ritCountOverThreshold);
/**
* Set the oldest region in transition.
*
* @param age age of the oldest RIT.
*/
void setRITOldestAge(long age);

View File

@ -16,13 +16,13 @@
* limitations under the License.
*/
package org.apache.hadoop.hbase.master.metrics;
package org.apache.hadoop.hbase.master;
/**
* Interface of a factory to create MasterMetricsSource when given a MasterMetricsWrapper
* Interface of a factory to create MetricsMasterSource when given a MetricsMasterWrapper
*/
public interface MasterMetricsSourceFactory {
public interface MetricsMasterSourceFactory {
MasterMetricsSource create(MasterMetricsWrapper beanWrapper);
MetricsMasterSource create(MetricsMasterWrapper masterWrapper);
}

View File

@ -16,13 +16,13 @@
* limitations under the License.
*/
package org.apache.hadoop.hbase.master.metrics;
package org.apache.hadoop.hbase.master;
/**
* This is the interface that will expose information to hadoop1/hadoop2 implementations of the
* MasterMetricsSource.
* MetricsMasterSource.
*/
public interface MasterMetricsWrapper {
public interface MetricsMasterWrapper {
/**
* Get ServerName
@ -31,54 +31,63 @@ public interface MasterMetricsWrapper {
/**
* Get Average Load
*
* @return Average Load
*/
double getAverageLoad();
/**
* Get the Cluster ID
*
* @return Cluster ID
*/
String getClusterId();
/**
* Get the Zookeeper Quorum Info
*
* @return Zookeeper Quorum Info
*/
String getZookeeperQuorum();
/**
* Get the co-processors
*
* @return Co-processors
*/
String[] getCoprocessors();
/**
* Get hbase master start time
*
* @return Start time of master in milliseconds
*/
long getMasterStartTime();
long getStartTime();
/**
* Get the hbase master active time
*
* @return Time in milliseconds when master became active
*/
long getMasterActiveTime();
long getActiveTime();
/**
* Whether this master is the active master
*
* @return True if this is the active master
*/
boolean getIsActiveMaster();
/**
* Get the live region servers
*
* @return Live region servers
*/
int getRegionServers();
/**
* Get the dead region servers
*
* @return Dead region Servers
*/
int getDeadRegionServers();

View File

@ -19,9 +19,11 @@
package org.apache.hadoop.hbase.metrics;
/**
* BaseMetricsSource for dynamic metrics to announce to Metrics2
* BaseSource for dynamic metrics to announce to Metrics2
*/
public interface BaseMetricsSource {
public interface BaseSource {
public static final String HBASE_METRICS_SYSTEM_NAME = "HBase";
/**
* Clear out the metrics and re-prepare the source.
@ -53,11 +55,11 @@ public interface BaseMetricsSource {
void decGauge(String gaugeName, long delta);
/**
* Remove a gauge and no longer announce it.
* Remove a metric and no longer announce it.
*
* @param key Name of the gauge to remove.
*/
void removeGauge(String key);
void removeMetric(String key);
/**
* Add some amount to a counter.
@ -84,12 +86,4 @@ public interface BaseMetricsSource {
*/
void updateQuantile(String name, long value);
/**
* Remove a counter and stop announcing it to metrics2.
*
* @param key
*/
void removeCounter(String key);
}

View File

@ -0,0 +1,62 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;
import org.apache.hadoop.hbase.metrics.BaseSource;
/**
* This interface will be implemented by a MetricsSource that will export metrics from
* multiple regions into the hadoop metrics system.
*/
public interface MetricsRegionAggregateSource extends BaseSource {
/**
* The name of the metrics
*/
static final String METRICS_NAME = "Regions";
/**
* The name of the metrics context that metrics will be under.
*/
static final String METRICS_CONTEXT = "regionserver";
/**
* Description
*/
static final String METRICS_DESCRIPTION = "Metrics about HBase RegionServer regions and tables";
/**
* The name of the metrics context that metrics will be under in jmx
*/
static final String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME;
/**
* Register a MetricsRegionSource as being open.
*
* @param source the source for the region being opened.
*/
void register(MetricsRegionSource source);
/**
* Remove a region's source. This is called when a region is closed.
*
* @param source The region to remove.
*/
void deregister(MetricsRegionSource source);
}

View File

@ -0,0 +1,166 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;
import org.apache.hadoop.hbase.metrics.BaseSource;
/**
* Interface for classes that expose metrics about the regionserver.
*/
public interface MetricsRegionServerSource extends BaseSource {
/**
* The name of the metrics
*/
static final String METRICS_NAME = "Server";
/**
* The name of the metrics context that metrics will be under.
*/
static final String METRICS_CONTEXT = "regionserver";
/**
* Description
*/
static final String METRICS_DESCRIPTION = "Metrics about HBase RegionServer";
/**
* The name of the metrics context that metrics will be under in jmx
*/
static final String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME;
/**
* Update the Put time histogram
*
* @param t time it took
*/
void updatePut(long t);
/**
* Update the Delete time histogram
*
* @param t time it took
*/
void updateDelete(long t);
/**
* Update the Get time histogram .
*
* @param t time it took
*/
void updateGet(long t);
/**
* Update the Increment time histogram.
*
* @param t time it took
*/
void updateIncrement(long t);
/**
* Update the Append time histogram.
*
* @param t time it took
*/
void updateAppend(long t);
// Strings used for exporting to metrics system.
static final String REGION_COUNT = "regionCount";
static final String REGION_COUNT_DESC = "Number of regions";
static final String STORE_COUNT = "storeCount";
static final String STORE_COUNT_DESC = "Number of Stores";
static final String STOREFILE_COUNT = "storeFileCount";
static final String STOREFILE_COUNT_DESC = "Number of Store Files";
static final String MEMSTORE_SIZE = "memStoreSize";
static final String MEMSTORE_SIZE_DESC = "Size of the memstore";
static final String STOREFILE_SIZE = "storeFileSize";
static final String STOREFILE_SIZE_DESC = "Size of storefiles being served.";
static final String TOTAL_REQUEST_COUNT = "totalRequestCount";
static final String TOTAL_REQUEST_COUNT_DESC =
"Total number of requests this RegionServer has answered.";
static final String READ_REQUEST_COUNT = "readRequestCount";
static final String READ_REQUEST_COUNT_DESC =
"Number of read requests this region server has answered.";
static final String WRITE_REQUEST_COUNT = "writeRequestCount";
static final String WRITE_REQUEST_COUNT_DESC =
"Number of mutation requests this region server has answered.";
static final String CHECK_MUTATE_FAILED_COUNT = "checkMutateFailedCount";
static final String CHECK_MUTATE_FAILED_COUNT_DESC =
"Number of Check and Mutate calls that failed the checks.";
static final String CHECK_MUTATE_PASSED_COUNT = "checkMutatePassedCount";
static final String CHECK_MUTATE_PASSED_COUNT_DESC =
"Number of Check and Mutate calls that passed the checks.";
static final String STOREFILE_INDEX_SIZE = "storeFileIndexSize";
static final String STOREFILE_INDEX_SIZE_DESC = "Size of indexes in storefiles on disk.";
static final String STATIC_INDEX_SIZE = "staticIndexSize";
static final String STATIC_INDEX_SIZE_DESC = "Uncompressed size of the static indexes.";
static final String STATIC_BLOOM_SIZE = "staticBloomSize";
static final String STATIC_BLOOM_SIZE_DESC =
"Uncompressed size of the static bloom filters.";
static final String NUMBER_OF_PUTS_WITHOUT_WAL = "putsWithoutWALCount";
static final String NUMBER_OF_PUTS_WITHOUT_WAL_DESC =
"Number of mutations that have been sent by clients with the write ahead logging turned off.";
static final String DATA_SIZE_WITHOUT_WAL = "putsWithoutWALSize";
static final String DATA_SIZE_WITHOUT_WAL_DESC =
"Size of data that has been sent by clients with the write ahead logging turned off.";
static final String PERCENT_FILES_LOCAL = "percentFilesLocal";
static final String PERCENT_FILES_LOCAL_DESC =
"The percent of HFiles that are stored on the local hdfs data node.";
static final String COMPACTION_QUEUE_LENGTH = "compactionQueueLength";
static final String COMPACTION_QUEUE_LENGTH_DESC = "Length of the queue for compactions.";
static final String FLUSH_QUEUE_LENGTH = "flushQueueLength";
static final String FLUSH_QUEUE_LENGTH_DESC = "Length of the queue for region flushes";
static final String BLOCK_CACHE_FREE_SIZE = "blockCacheFreeSize";
static final String BLOCK_CACHE_FREE_DESC =
"Size of the block cache that is not occupied.";
static final String BLOCK_CACHE_COUNT = "blockCacheCount";
static final String BLOCK_CACHE_COUNT_DESC = "Number of block in the block cache.";
static final String BLOCK_CACHE_SIZE = "blockCacheSize";
static final String BLOCK_CACHE_SIZE_DESC = "Size of the block cache.";
static final String BLOCK_CACHE_HIT_COUNT = "blockCacheHitCount";
static final String BLOCK_CACHE_HIT_COUNT_DESC = "Count of the hit on the block cache.";
static final String BLOCK_CACHE_MISS_COUNT = "blockCacheMissCount";
static final String BLOCK_COUNT_MISS_COUNT_DESC =
"Number of requests for a block that missed the block cache.";
static final String BLOCK_CACHE_EVICTION_COUNT = "blockCacheEvictionCount";
static final String BLOCK_CACHE_EVICTION_COUNT_DESC =
"Count of the number of blocks evicted from the block cache.";
static final String BLOCK_CACHE_HIT_PERCENT = "blockCountHitPercent";
static final String BLOCK_CACHE_HIT_PERCENT_DESC =
"Percent of block cache requests that are hits";
static final String BLOCK_CACHE_EXPRESS_HIT_PERCENT = "blockCacheExpressHitPercent";
static final String BLOCK_CACHE_EXPRESS_HIT_PERCENT_DESC =
"The percent of the time that requests with the cache turned on hit the cache.";
static final String RS_START_TIME_NAME = "regionServerStartTime";
static final String ZOOKEEPER_QUORUM_NAME = "zookeeperQuorum";
static final String SERVER_NAME_NAME = "serverName";
static final String CLUSTER_ID_NAME = "clusterId";
static final String RS_START_TIME_DESC = "RegionServer Start Time";
static final String ZOOKEEPER_QUORUM_DESC = "Zookeeper Quorum";
static final String SERVER_NAME_DESC = "Server Name";
static final String CLUSTER_ID_DESC = "Cluster Id";
static final String UPDATES_BLOCKED_TIME = "updatesBlockedTime";
static final String UPDATES_BLOCKED_DESC =
"Number of MS updates have been blocked so that the memstore can be flushed.";
static final String DELETE_KEY = "delete";
static final String GET_KEY = "get";
static final String INCREMENT_KEY = "increment";
static final String PUT_KEY = "multiput";
static final String APPEND_KEY = "append";
}

View File

@ -18,29 +18,24 @@
package org.apache.hadoop.hbase.regionserver;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
/**
* Interface of a factory to create Metrics Sources used inside of regionservers.
*/
public interface MetricsRegionServerSourceFactory {
/**
* This is the JMX management interface for HBase Region Server information
* Given a wrapper create a MetricsRegionServerSource.
*
* @param regionServerWrapper The wrapped region server
* @return a Metrics Source.
*/
@Evolving
public interface MXBean {
MetricsRegionServerSource createServer(MetricsRegionServerWrapper regionServerWrapper);
/**
* Return RegionServer's ServerName
* @return ServerName
* Create a MetricsRegionSource from a MetricsRegionWrapper.
*
* @param wrapper
* @return
*/
public String getServerName();
/**
* Get loaded co-processors
* @return Loaded Co-processors
*/
public String[] getCoprocessors();
/**
* Get Zookeeper Quorum
* @return Comma-separated list of Zookeeper Quorum servers
*/
public String getZookeeperQuorum();
MetricsRegionSource createRegion(MetricsRegionWrapper wrapper);
}

View File

@ -0,0 +1,205 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;
/**
* This is the interface that will expose RegionServer information to hadoop1/hadoop2
* implementations of the MetricsRegionServerSource.
*/
public interface MetricsRegionServerWrapper {
/**
* Get ServerName
*/
public String getServerName();
/**
* Get the Cluster ID
*
* @return Cluster ID
*/
public String getClusterId();
/**
* Get the Zookeeper Quorum Info
*
* @return Zookeeper Quorum Info
*/
public String getZookeeperQuorum();
/**
* Get the co-processors
*
* @return Co-processors
*/
public String getCoprocessors();
/**
* Get HRegionServer start time
*
* @return Start time of RegionServer in milliseconds
*/
public long getStartCode();
/**
* The number of online regions
*/
long getNumOnlineRegions();
/**
* Get the number of stores hosted on this region server.
*/
long getNumStores();
/**
* Get the number of store files hosted on this region server.
*/
long getNumStoreFiles();
/**
* Get the size of the memstore on this region server.
*/
long getMemstoreSize();
/**
* Get the total size of the store files this region server is serving from.
*/
long getStoreFileSize();
/**
* Get the number of requests per second.
*/
double getRequestsPerSecond();
/**
* Get the total number of requests per second.
*/
long getTotalRequestCount();
/**
* Get the number of read requests to regions hosted on this region server.
*/
long getReadRequestsCount();
/**
* Get the number of write requests to regions hosted on this region server.
*/
long getWriteRequestsCount();
/**
* Get the number of CAS operations that failed.
*/
long getCheckAndMutateChecksFailed();
/**
* Get the number of CAS operations that passed.
*/
long getCheckAndMutateChecksPassed();
/**
* Get the Size of indexes in storefiles on disk.
*/
long getStoreFileIndexSize();
/**
* Get the size of of the static indexes including the roots.
*/
long getTotalStaticIndexSize();
/**
* Get the size of the static bloom filters.
*/
long getTotalStaticBloomSize();
/**
* Number of mutations received with WAL explicitly turned off.
*/
long getNumPutsWithoutWAL();
/**
* Ammount of data in the memstore but not in the WAL because mutations explicitly had their
* WAL turned off.
*/
long getDataInMemoryWithoutWAL();
/**
* Get the percent of HFiles' that are local.
*/
int getPercentFileLocal();
/**
* Get the size of the compaction queue
*/
int getCompactionQueueSize();
/**
* Get the size of the flush queue.
*/
int getFlushQueueSize();
/**
* Get the size of the block cache that is free.
*/
long getBlockCacheFreeSize();
/**
* Get the number of items in the block cache.
*/
long getBlockCacheCount();
/**
* Get the total size of the block cache.
*/
long getBlockCacheSize();
/**
* Get the count of hits to the block cache
*/
long getBlockCacheHitCount();
/**
* Get the count of misses to the block cache.
*/
long getBlockCacheMissCount();
/**
* Get the number of items evicted from the block cache.
*/
long getBlockCacheEvictedCount();
/**
* Get the percent of all requests that hit the block cache.
*/
int getBlockCacheHitPercent();
/**
* Get the percent of requests with the block cache turned on that hit the block cache.
*/
int getBlockCacheHitCachingPercent();
/**
* Force a re-computation of the metrics.
*/
void forceRecompute();
/**
* Get the amount of time that updates were blocked.
*/
long getUpdatesBlockedTime();
}

View File

@ -0,0 +1,62 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;
/**
* This interface will be implemented to allow single regions to push metrics into
* MetricsRegionAggregateSource that will in turn push data to the Hadoop metrics system.
*/
public interface MetricsRegionSource extends Comparable<MetricsRegionSource> {
/**
* Close the region's metrics as this region is closing.
*/
void close();
/**
* Update related counts of puts.
*/
void updatePut();
/**
* Update related counts of deletes.
*/
void updateDelete();
/**
* Update related counts of gets.
*/
void updateGet();
/**
* Update related counts of increments.
*/
void updateIncrement();
/**
* Update related counts of appends.
*/
void updateAppend();
/**
* Get the aggregate source to which this reports.
*/
MetricsRegionAggregateSource getAggregateSource();
}

View File

@ -0,0 +1,71 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;
/**
* Interface of class that will wrap an HRegion and export numbers so they can be
* used in MetricsRegionSource
*/
public interface MetricsRegionWrapper {
/**
* Get the name of the table the region belongs to.
*
* @return The string version of the table name.
*/
String getTableName();
/**
* Get the name of the region.
*
* @return The encoded name of the region.
*/
String getRegionName();
/**
* Get the number of stores hosted on this region server.
*/
long getNumStores();
/**
* Get the number of store files hosted on this region server.
*/
long getNumStoreFiles();
/**
* Get the size of the memstore on this region server.
*/
long getMemstoreSize();
/**
* Get the total size of the store files this region server is serving from.
*/
long getStoreFileSize();
/**
* Get the total number of read requests that have been issued against this region
*/
long getReadRequestCount();
/**
* Get the total number of mutations that have been issued against this region.
*/
long getWriteRequestCount();
}

View File

@ -16,29 +16,29 @@
* limitations under the License.
*/
package org.apache.hadoop.hbase.replication.regionserver.metrics;
package org.apache.hadoop.hbase.replication.regionserver;
import org.apache.hadoop.hbase.metrics.BaseMetricsSource;
import org.apache.hadoop.hbase.metrics.BaseSource;
/**
* Provides access to gauges and counters. Implementers will hide the details of hadoop1 or
* hadoop2's metrics2 classes and publishing.
*/
public interface ReplicationMetricsSource extends BaseMetricsSource {
public interface MetricsReplicationSource extends BaseSource {
/**
* The name of the metrics
*/
static final String METRICS_NAME = "ReplicationMetrics";
static final String METRICS_NAME = "Replication";
/**
* The name of the metrics context that metrics will be under.
*/
static final String METRICS_CONTEXT = "replicationmetrics";
static final String METRICS_CONTEXT = "regionserver";
/**
* The name of the metrics context that metrics will be under.
*/
static final String METRICS_JMX_CONTEXT = "ReplicationMetrics";
static final String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME;
/**
* A description.

View File

@ -16,20 +16,20 @@
* limitations under the License.
*/
package org.apache.hadoop.hbase.rest.metrics;
package org.apache.hadoop.hbase.rest;
import org.apache.hadoop.hbase.metrics.BaseMetricsSource;
import org.apache.hadoop.hbase.metrics.BaseSource;
/**
* Interface of the Metrics Source that will export data to Hadoop's Metrics2 system.
*/
public interface RESTMetricsSource extends BaseMetricsSource {
public interface MetricsRESTSource extends BaseSource {
public static String METRICS_NAME = "Rest";
public static String METRICS_NAME = "REST";
public static String CONTEXT = "rest";
public static String JMX_CONTEXT = "Rest";
public static String JMX_CONTEXT = "REST";
public static String METRICS_DESCRIPTION = "Metrics about the HBase REST server";
@ -49,42 +49,49 @@ public interface RESTMetricsSource extends BaseMetricsSource {
/**
* Increment the number of requests
*
* @param inc Ammount to increment by
*/
void incrementRequests(int inc);
/**
* Increment the number of successful Get requests.
*
* @param inc Number of successful get requests.
*/
void incrementSucessfulGetRequests(int inc);
/**
* Increment the number of successful Put requests.
*
* @param inc Number of successful put requests.
*/
void incrementSucessfulPutRequests(int inc);
/**
* Increment the number of successful Delete requests.
*
* @param inc
*/
void incrementSucessfulDeleteRequests(int inc);
/**
* Increment the number of failed Put Requests.
*
* @param inc Number of failed Put requests.
*/
void incrementFailedPutRequests(int inc);
/**
* Increment the number of failed Get requests.
*
* @param inc The number of failed Get Requests.
*/
void incrementFailedGetRequests(int inc);
/**
* Increment the number of failed Delete requests.
*
* @param inc The number of failed delete requests.
*/
void incrementFailedDeleteRequests(int inc);

View File

@ -16,14 +16,14 @@
* limitations under the License.
*/
package org.apache.hadoop.hbase.thrift.metrics;
package org.apache.hadoop.hbase.thrift;
import org.apache.hadoop.hbase.metrics.BaseMetricsSource;
import org.apache.hadoop.hbase.metrics.BaseSource;
/**
* Inteface of a class that will export metrics about Thrift to hadoop's metrics2.
*/
public interface ThriftServerMetricsSource extends BaseMetricsSource {
public interface MetricsThriftServerSource extends BaseSource {
static final String BATCH_GET_KEY = "batchGet";
static final String BATCH_MUTATE_KEY = "batchMutate";

View File

@ -16,10 +16,10 @@
* limitations under the License.
*/
package org.apache.hadoop.hbase.thrift.metrics;
package org.apache.hadoop.hbase.thrift;
/** Factory that will be used to create metrics sources for the two diffent types of thrift servers. */
public interface ThriftServerMetricsSourceFactory {
public interface MetricsThriftServerSourceFactory {
static final String METRICS_NAME = "Thrift";
static final String METRICS_DESCRIPTION = "Thrift Server Metrics";
@ -28,8 +28,10 @@ public interface ThriftServerMetricsSourceFactory {
static final String THRIFT_TWO_METRICS_CONTEXT = "thrift-two";
static final String THRIFT_TWO_JMX_CONTEXT = "Thrift,sub=ThriftTwo";
ThriftServerMetricsSource createThriftOneSource();
/** Create a Source for a thrift one server */
MetricsThriftServerSource createThriftOneSource();
ThriftServerMetricsSource createThriftTwoSource();
/** Create a Source for a thrift two server */
MetricsThriftServerSource createThriftTwoSource();
}

View File

@ -16,13 +16,15 @@
* limitations under the License.
*/
package org.apache.hadoop.metrics;
package org.apache.hadoop.metrics2;
/**
*
* Metrics Histogram interface. Implementing classes will expose computed
* quartile values through the metrics system.
*/
public interface MetricHistogram {
//Strings used to create metrics names.
static final String NUM_OPS_METRIC_NAME = "_num_ops";
static final String MIN_METRIC_NAME = "_min";
static final String MAX_METRIC_NAME = "_max";
@ -32,6 +34,10 @@ public interface MetricHistogram {
static final String NINETY_FIFTH_PERCENTILE_METRIC_NAME = "_95th_percentile";
static final String NINETY_NINETH_PERCENTILE_METRIC_NAME = "_99th_percentile";
/**
* Add a single value to a histogram's stream of values.
* @param value
*/
void add(long value);
}

View File

@ -16,12 +16,12 @@
* limitations under the License.
*/
package org.apache.hadoop.metrics;
package org.apache.hadoop.metrics2;
import java.util.concurrent.ScheduledExecutorService;
/**
*
* ScheduledExecutorService for metrics.
*/
public interface MetricsExecutor {

View File

@ -16,20 +16,21 @@
* limitations under the License.
*/
package org.apache.hadoop.hbase.master.metrics;
package org.apache.hadoop.hbase.master;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
import org.apache.hadoop.hbase.master.MetricsMasterSource;
import org.junit.Test;
/**
* Test for the CompatibilitySingletonFactory and building MasterMetricsSource
* Test for the CompatibilitySingletonFactory and building MetricsMasterSource
*/
public class TestMasterMetricsSourceFactory {
public class TestMetricsMasterSourceFactory {
@Test(expected=RuntimeException.class)
public void testGetInstanceNoHadoopCompat() throws Exception {
//This should throw an exception because there is no compat lib on the class path.
CompatibilitySingletonFactory.getInstance(MasterMetricsSource.class);
CompatibilitySingletonFactory.getInstance(MetricsMasterSourceFactory.class);
}
}

View File

@ -0,0 +1,36 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
import org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactory;
import org.junit.Test;
/**
* Test for the CompatibilitySingletonFactory and building MetricsRegionServerSource
*/
public class TestMetricsRegionServerSourceFactory {
@Test(expected=RuntimeException.class)
public void testGetInstanceNoHadoopCompat() throws Exception {
//This should throw an exception because there is no compat lib on the class path.
CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class);
}
}

View File

@ -16,19 +16,20 @@
* limitations under the License.
*/
package org.apache.hadoop.hbase.replication.regionserver.metrics;
package org.apache.hadoop.hbase.replication.regionserver;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
import org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSource;
import org.junit.Test;
/**
* Test for the CompatibilitySingletonFactory and building ReplicationMetricsSource
* Test for the CompatibilitySingletonFactory and building MetricsReplicationSource
*/
public class TestReplicationMetricsSourceFactory {
public class TestMetricsReplicationSourceFactory {
@Test(expected=RuntimeException.class)
public void testGetInstanceNoHadoopCompat() throws Exception {
//This should throw an exception because there is no compat lib on the class path.
CompatibilitySingletonFactory.getInstance(ReplicationMetricsSource.class);
CompatibilitySingletonFactory.getInstance(MetricsReplicationSource.class);
}
}

View File

@ -16,21 +16,22 @@
* limitations under the License.
*/
package org.apache.hadoop.hbase.rest.metrics;
package org.apache.hadoop.hbase.rest;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
import org.apache.hadoop.hbase.rest.MetricsRESTSource;
import org.junit.Test;
/**
* Test of Rest Metrics Source interface.
*/
public class TestRESTMetricsSource {
public class TestMetricsRESTSource {
@Test(expected=RuntimeException.class)
public void testGetInstanceNoHadoopCompat() throws Exception {
//This should throw an exception because there is no compat lib on the class path.
CompatibilitySingletonFactory.getInstance(RESTMetricsSource.class);
CompatibilitySingletonFactory.getInstance(MetricsRESTSource.class);
}
}

View File

@ -18,7 +18,7 @@
package org.apache.hadoop.hbase.test;
import org.apache.hadoop.hbase.metrics.BaseMetricsSource;
import org.apache.hadoop.hbase.metrics.BaseSource;
/** Interface of a class to make assertions about metrics values. */
public interface MetricsAssertHelper {
@ -28,128 +28,128 @@ public interface MetricsAssertHelper {
*
* @param name The name of the tag.
* @param expected The expected value
* @param source The BaseMetricsSource{@link BaseMetricsSource} that will provide the tags,
* @param source The BaseSource{@link BaseSource} that will provide the tags,
* gauges, and counters.
*/
public void assertTag(String name, String expected, BaseMetricsSource source);
public void assertTag(String name, String expected, BaseSource source);
/**
* Assert that a gauge exists and that it's value is equal to the expected value.
*
* @param name The name of the gauge
* @param expected The expected value of the gauge.
* @param source The BaseMetricsSource{@link BaseMetricsSource} that will provide the tags,
* @param source The BaseSource{@link BaseSource} that will provide the tags,
* gauges, and counters.
*/
public void assertGauge(String name, long expected, BaseMetricsSource source);
public void assertGauge(String name, long expected, BaseSource source);
/**
* Assert that a gauge exists and it's value is greater than a given value
*
* @param name The name of the gauge
* @param expected Value that the gauge is expected to be greater than
* @param source The BaseMetricsSource{@link BaseMetricsSource} that will provide the tags,
* @param source The BaseSource{@link BaseSource} that will provide the tags,
* gauges, and counters.
*/
public void assertGaugeGt(String name, long expected, BaseMetricsSource source);
public void assertGaugeGt(String name, long expected, BaseSource source);
/**
* Assert that a gauge exists and it's value is less than a given value
*
* @param name The name of the gauge
* @param expected Value that the gauge is expected to be less than
* @param source The BaseMetricsSource{@link BaseMetricsSource} that will provide the tags,
* @param source The BaseSource{@link BaseSource} that will provide the tags,
* gauges, and counters.
*/
public void assertGaugeLt(String name, long expected, BaseMetricsSource source);
public void assertGaugeLt(String name, long expected, BaseSource source);
/**
* Assert that a gauge exists and that it's value is equal to the expected value.
*
* @param name The name of the gauge
* @param expected The expected value of the gauge.
* @param source The BaseMetricsSource{@link BaseMetricsSource} that will provide the tags,
* @param source The BaseSource{@link BaseSource} that will provide the tags,
* gauges, and counters.
*/
public void assertGauge(String name, double expected, BaseMetricsSource source);
public void assertGauge(String name, double expected, BaseSource source);
/**
* Assert that a gauge exists and it's value is greater than a given value
*
* @param name The name of the gauge
* @param expected Value that the gauge is expected to be greater than
* @param source The BaseMetricsSource{@link BaseMetricsSource} that will provide the tags,
* @param source The BaseSource{@link BaseSource} that will provide the tags,
* gauges, and counters.
*/
public void assertGaugeGt(String name, double expected, BaseMetricsSource source);
public void assertGaugeGt(String name, double expected, BaseSource source);
/**
* Assert that a gauge exists and it's value is less than a given value
*
* @param name The name of the gauge
* @param expected Value that the gauge is expected to be less than
* @param source The BaseMetricsSource{@link BaseMetricsSource} that will provide the tags,
* @param source The BaseSource{@link BaseSource} that will provide the tags,
* gauges, and counters.
*/
public void assertGaugeLt(String name, double expected, BaseMetricsSource source);
public void assertGaugeLt(String name, double expected, BaseSource source);
/**
* Assert that a counter exists and that it's value is equal to the expected value.
*
* @param name The name of the counter.
* @param expected The expected value
* @param source The BaseMetricsSource{@link BaseMetricsSource} that will provide the tags,
* @param source The BaseSource{@link BaseSource} that will provide the tags,
* gauges, and counters.
*/
public void assertCounter(String name, long expected, BaseMetricsSource source);
public void assertCounter(String name, long expected, BaseSource source);
/**
* Assert that a counter exists and that it's value is greater than the given value.
*
* @param name The name of the counter.
* @param expected The value the counter is expected to be greater than.
* @param source The BaseMetricsSource{@link BaseMetricsSource} that will provide the tags,
* @param source The BaseSource{@link BaseSource} that will provide the tags,
* gauges, and counters.
*/
public void assertCounterGt(String name, long expected, BaseMetricsSource source);
public void assertCounterGt(String name, long expected, BaseSource source);
/**
* Assert that a counter exists and that it's value is less than the given value.
*
* @param name The name of the counter.
* @param expected The value the counter is expected to be less than.
* @param source The BaseMetricsSource{@link BaseMetricsSource} that will provide the tags,
* @param source The BaseSource{@link BaseSource} that will provide the tags,
* gauges, and counters.
*/
public void assertCounterLt(String name, long expected, BaseMetricsSource source);
public void assertCounterLt(String name, long expected, BaseSource source);
/**
* Get the value of a counter.
*
* @param name name of the counter.
* @param source The BaseMetricsSource{@link BaseMetricsSource} that will provide the tags,
* @param source The BaseSource{@link BaseSource} that will provide the tags,
* gauges, and counters.
* @return long value of the counter.
*/
public long getCounter(String name, BaseMetricsSource source);
public long getCounter(String name, BaseSource source);
/**
* Get the value of a gauge as a double.
*
* @param name name of the gauge.
* @param source The BaseMetricsSource{@link BaseMetricsSource} that will provide the tags,
* @param source The BaseSource{@link BaseSource} that will provide the tags,
* gauges, and counters.
* @return double value of the gauge.
*/
public double getGaugeDouble(String name, BaseMetricsSource source);
public double getGaugeDouble(String name, BaseSource source);
/**
* Get the value of a gauge as a long.
*
* @param name name of the gauge.
* @param source The BaseMetricsSource{@link BaseMetricsSource} that will provide the tags,
* @param source The BaseSource{@link BaseSource} that will provide the tags,
* gauges, and counters.
* @return long value of the gauge.
*/
public long getGaugeLong(String name, BaseMetricsSource source);
public long getGaugeLong(String name, BaseSource source);
}

View File

@ -16,21 +16,22 @@
* limitations under the License.
*/
package org.apache.hadoop.hbase.thrift.metrics;
package org.apache.hadoop.hbase.thrift;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
import org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactory;
import org.junit.Test;
/**
* Test for the interface of ThriftServerMetricsSourceFactory
* Test for the interface of MetricsThriftServerSourceFactory
*/
public class TestThriftServerMetricsSourceFactory {
public class TestMetricsThriftServerSourceFactory {
@Test(expected=RuntimeException.class)
public void testGetInstanceNoHadoopCompat() throws RuntimeException {
//This should throw an exception because there is no compat lib on the class path.
CompatibilitySingletonFactory.getInstance(ThriftServerMetricsSourceFactory.class);
CompatibilitySingletonFactory.getInstance(MetricsThriftServerSourceFactory.class);
}
}

View File

@ -97,6 +97,10 @@ limitations under the License.
<groupId>com.yammer.metrics</groupId>
<artifactId>metrics-core</artifactId>
</dependency>
<dependency>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-test</artifactId>

View File

@ -16,22 +16,22 @@
* limitations under the License.
*/
package org.apache.hadoop.hbase.master.metrics;
package org.apache.hadoop.hbase.master;
/**
* Factory to create MasterMetricsSource when given a MasterMetricsWrapper
* Factory to create MetricsMasterSource when given a MetricsMasterWrapper
*/
public class MasterMetricsSourceFactoryImpl implements MasterMetricsSourceFactory {
public class MetricsMasterSourceFactoryImpl implements MetricsMasterSourceFactory {
private static enum FactoryStorage {
INSTANCE;
MasterMetricsSource source;
MetricsMasterSource masterSource;
}
@Override
public synchronized MasterMetricsSource create(MasterMetricsWrapper beanWrapper) {
if (FactoryStorage.INSTANCE.source == null ) {
FactoryStorage.INSTANCE.source = new MasterMetricsSourceImpl(beanWrapper);
public synchronized MetricsMasterSource create(MetricsMasterWrapper masterWrapper) {
if (FactoryStorage.INSTANCE.masterSource == null) {
FactoryStorage.INSTANCE.masterSource = new MetricsMasterSourceImpl(masterWrapper);
}
return FactoryStorage.INSTANCE.source;
return FactoryStorage.INSTANCE.masterSource;
}
}

View File

@ -16,41 +16,42 @@
* limitations under the License.
*/
package org.apache.hadoop.hbase.master.metrics;
package org.apache.hadoop.hbase.master;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.metrics.BaseMetricsSourceImpl;
import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
import org.apache.hadoop.metrics2.MetricsBuilder;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.lib.MetricMutableCounterLong;
import org.apache.hadoop.metrics2.lib.MetricMutableGaugeLong;
import org.apache.hadoop.metrics2.lib.MetricMutableHistogram;
/** Hadoop1 implementation of MasterMetricsSource. */
public class MasterMetricsSourceImpl
extends BaseMetricsSourceImpl implements MasterMetricsSource {
/**
* Hadoop1 implementation of MetricsMasterSource.
*/
public class MetricsMasterSourceImpl
extends BaseSourceImpl implements MetricsMasterSource {
private static final Log LOG = LogFactory.getLog(MasterMetricsSourceImpl.class.getName());
private static final Log LOG = LogFactory.getLog(MetricsMasterSourceImpl.class.getName());
MetricMutableCounterLong clusterRequestsCounter;
MetricMutableGaugeLong ritGauge;
MetricMutableGaugeLong ritCountOverThresholdGauge;
MetricMutableGaugeLong ritOldestAgeGauge;
private final MasterMetricsWrapper masterWrapper;
private final MetricsMasterWrapper masterWrapper;
private MetricMutableCounterLong clusterRequestsCounter;
private MetricMutableGaugeLong ritGauge;
private MetricMutableGaugeLong ritCountOverThresholdGauge;
private MetricMutableGaugeLong ritOldestAgeGauge;
private MetricMutableHistogram splitTimeHisto;
private MetricMutableHistogram splitSizeHisto;
public MasterMetricsSourceImpl(MasterMetricsWrapper masterWrapper) {
public MetricsMasterSourceImpl(MetricsMasterWrapper masterWrapper) {
this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT, masterWrapper);
}
public MasterMetricsSourceImpl(String metricsName,
public MetricsMasterSourceImpl(String metricsName,
String metricsDescription,
String metricsContext,
String metricsJmxContext,
MasterMetricsWrapper masterWrapper) {
MetricsMasterWrapper masterWrapper) {
super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
this.masterWrapper = masterWrapper;
}
@ -108,9 +109,9 @@ public class MasterMetricsSourceImpl
if (masterWrapper != null) {
metricsRecordBuilder
.addGauge(MASTER_ACTIVE_TIME_NAME,
MASTER_ACTIVE_TIME_DESC, masterWrapper.getMasterActiveTime())
MASTER_ACTIVE_TIME_DESC, masterWrapper.getActiveTime())
.addGauge(MASTER_START_TIME_NAME,
MASTER_START_TIME_DESC, masterWrapper.getMasterStartTime())
MASTER_START_TIME_DESC, masterWrapper.getStartTime())
.addGauge(AVERAGE_LOAD_NAME, AVERAGE_LOAD_DESC, masterWrapper.getAverageLoad())
.addGauge(NUM_REGION_SERVERS_NAME,
NUMBER_OF_REGION_SERVERS_DESC, masterWrapper.getRegionServers())
@ -125,7 +126,7 @@ public class MasterMetricsSourceImpl
String.valueOf(masterWrapper.getIsActiveMaster()));
}
metricsRegistry.snapshot(metricsRecordBuilder, true);
metricsRegistry.snapshot(metricsRecordBuilder, all);
}
}

View File

@ -19,19 +19,16 @@
package org.apache.hadoop.hbase.metrics;
import org.apache.hadoop.metrics2.MetricsBuilder;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.MetricsSource;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.lib.DynamicMetricsRegistry;
import org.apache.hadoop.metrics2.lib.MetricMutableCounterLong;
import org.apache.hadoop.metrics2.lib.MetricMutableGaugeLong;
import org.apache.hadoop.metrics2.lib.MetricMutableHistogram;
import org.apache.hadoop.metrics2.lib.MetricMutableQuantiles;
import org.apache.hadoop.metrics2.impl.JmxCacheBuster;
import org.apache.hadoop.metrics2.lib.*;
import org.apache.hadoop.metrics2.source.JvmMetricsSource;
/**
* Hadoop 1 implementation of BaseMetricsSource (using metrics2 framework)
* Hadoop 1 implementation of BaseSource (using metrics2 framework)
*/
public class BaseMetricsSourceImpl implements BaseMetricsSource, MetricsSource {
public class BaseSourceImpl implements BaseSource, MetricsSource {
private static enum DefaultMetricsSystemInitializer {
INSTANCE;
@ -46,8 +43,6 @@ public class BaseMetricsSourceImpl implements BaseMetricsSource, MetricsSource {
}
}
private static boolean defaultMetricsSystemInited = false;
public static final String HBASE_METRICS_SYSTEM_NAME = "hbase";
protected final DynamicMetricsRegistry metricsRegistry;
protected final String metricsName;
@ -55,7 +50,7 @@ public class BaseMetricsSourceImpl implements BaseMetricsSource, MetricsSource {
protected final String metricsContext;
protected final String metricsJmxContext;
public BaseMetricsSourceImpl(
public BaseSourceImpl(
String metricsName,
String metricsDescription,
String metricsContext,
@ -137,22 +132,15 @@ public class BaseMetricsSourceImpl implements BaseMetricsSource, MetricsSource {
}
/**
* Remove a named gauge.
* Remove a named metric.
*
* @param key
*/
public void removeGauge(String key) {
public void removeMetric(String key) {
metricsRegistry.removeMetric(key);
JmxCacheBuster.clearJmxCache();
}
/**
* Remove a named counter.
*
* @param key
*/
public void removeCounter(String key) {
metricsRegistry.removeMetric(key);
}
/**
* Method to export all the metrics.
@ -162,14 +150,16 @@ public class BaseMetricsSourceImpl implements BaseMetricsSource, MetricsSource {
*/
@Override
public void getMetrics(MetricsBuilder metricsBuilder, boolean all) {
metricsRegistry.snapshot(metricsBuilder.addRecord(metricsRegistry.name()), all);
MetricsRecordBuilder mrb = metricsBuilder.addRecord(metricsName)
.setContext(metricsContext);
metricsRegistry.snapshot(mrb, all);
}
/**
* Used to get at the DynamicMetricsRegistry.
* @return DynamicMetricsRegistry
*/
protected DynamicMetricsRegistry getMetricsRegistry() {
public DynamicMetricsRegistry getMetricsRegistry() {
return metricsRegistry;
}
}

View File

@ -0,0 +1,82 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
import org.apache.hadoop.metrics2.MetricsBuilder;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import java.util.TreeSet;
public class MetricsRegionAggregateSourceImpl extends BaseSourceImpl
implements MetricsRegionAggregateSource {
private final Log LOG = LogFactory.getLog(this.getClass());
private final TreeSet<MetricsRegionSourceImpl> regionSources =
new TreeSet<MetricsRegionSourceImpl>();
public MetricsRegionAggregateSourceImpl() {
this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT);
}
public MetricsRegionAggregateSourceImpl(String metricsName,
String metricsDescription,
String metricsContext,
String metricsJmxContext) {
super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
}
@Override
public void register(MetricsRegionSource source) {
regionSources.add((MetricsRegionSourceImpl) source);
}
@Override
public void deregister(MetricsRegionSource source) {
regionSources.remove(source);
}
/**
* Yes this is a get function that doesn't return anything. Thanks Hadoop for breaking all
* expectations of java programmers. Instead of returning anything Hadoop metrics expects
* getMetrics to push the metrics into the metricsBuilder.
*
* @param metricsBuilder Builder to accept metrics
* @param all push all or only changed?
*/
@Override
public void getMetrics(MetricsBuilder metricsBuilder, boolean all) {
MetricsRecordBuilder mrb = metricsBuilder.addRecord(metricsName)
.setContext(metricsContext);
if (regionSources != null) {
for (MetricsRegionSourceImpl regionMetricSource : regionSources) {
regionMetricSource.snapshot(mrb, all);
}
}
metricsRegistry.snapshot(mrb, all);
}
}

View File

@ -0,0 +1,52 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;
/**
* Factory to create MetricsRegionServerSource when given a MetricsRegionServerWrapper
*/
public class MetricsRegionServerSourceFactoryImpl implements MetricsRegionServerSourceFactory {
private static enum FactoryStorage {
INSTANCE;
private MetricsRegionServerSource serverSource;
private MetricsRegionAggregateSourceImpl aggImpl;
}
private synchronized MetricsRegionAggregateSourceImpl getAggregate() {
if (FactoryStorage.INSTANCE.aggImpl == null) {
FactoryStorage.INSTANCE.aggImpl = new MetricsRegionAggregateSourceImpl();
}
return FactoryStorage.INSTANCE.aggImpl;
}
@Override
public synchronized MetricsRegionServerSource createServer(MetricsRegionServerWrapper regionServerWrapper) {
if (FactoryStorage.INSTANCE.serverSource == null) {
FactoryStorage.INSTANCE.serverSource = new MetricsRegionServerSourceImpl(
regionServerWrapper);
}
return FactoryStorage.INSTANCE.serverSource;
}
@Override
public MetricsRegionSource createRegion(MetricsRegionWrapper wrapper) {
return new MetricsRegionSourceImpl(wrapper, getAggregate());
}
}

View File

@ -0,0 +1,161 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;
import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
import org.apache.hadoop.metrics2.MetricHistogram;
import org.apache.hadoop.metrics2.MetricsBuilder;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
/**
* Hadoop1 implementation of MetricsRegionServerSource.
*/
public class MetricsRegionServerSourceImpl
extends BaseSourceImpl implements MetricsRegionServerSource {
final MetricsRegionServerWrapper rsWrap;
private final MetricHistogram putHisto;
private final MetricHistogram deleteHisto;
private final MetricHistogram getHisto;
private final MetricHistogram incrementHisto;
private final MetricHistogram appendHisto;
public MetricsRegionServerSourceImpl(MetricsRegionServerWrapper rsWrap) {
this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT, rsWrap);
}
public MetricsRegionServerSourceImpl(String metricsName,
String metricsDescription,
String metricsContext,
String metricsJmxContext,
MetricsRegionServerWrapper rsWrap) {
super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
this.rsWrap = rsWrap;
putHisto = getMetricsRegistry().getHistogram(PUT_KEY);
deleteHisto = getMetricsRegistry().getHistogram(DELETE_KEY);
getHisto = getMetricsRegistry().getHistogram(GET_KEY);
incrementHisto = getMetricsRegistry().getHistogram(INCREMENT_KEY);
appendHisto = getMetricsRegistry().getHistogram(APPEND_KEY);
}
@Override
public void init() {
super.init();
}
@Override
public void updatePut(long t) {
putHisto.add(t);
}
@Override
public void updateDelete(long t) {
deleteHisto.add(t);
}
@Override
public void updateGet(long t) {
getHisto.add(t);
}
@Override
public void updateIncrement(long t) {
incrementHisto.add(t);
}
@Override
public void updateAppend(long t) {
appendHisto.add(t);
}
/**
* Yes this is a get function that doesn't return anything. Thanks Hadoop for breaking all
* expectations of java programmers. Instead of returning anything Hadoop metrics expects
* getMetrics to push the metrics into the metricsBuilder.
*
* @param metricsBuilder Builder to accept metrics
* @param all push all or only changed?
*/
@Override
public void getMetrics(MetricsBuilder metricsBuilder, boolean all) {
MetricsRecordBuilder mrb = metricsBuilder.addRecord(metricsName)
.setContext(metricsContext);
// rsWrap can be null because this function is called inside of init.
if (rsWrap != null) {
mrb.addGauge(REGION_COUNT, REGION_COUNT_DESC, rsWrap.getNumOnlineRegions())
.addGauge(STORE_COUNT, STORE_COUNT_DESC, rsWrap.getNumStores())
.addGauge(STOREFILE_COUNT, STOREFILE_COUNT_DESC, rsWrap.getNumStoreFiles())
.addGauge(MEMSTORE_SIZE, MEMSTORE_SIZE_DESC, rsWrap.getMemstoreSize())
.addGauge(STOREFILE_SIZE, STOREFILE_SIZE_DESC, rsWrap.getStoreFileSize())
.addGauge(RS_START_TIME_NAME, RS_START_TIME_DESC, rsWrap.getStartCode())
.addCounter(TOTAL_REQUEST_COUNT, TOTAL_REQUEST_COUNT_DESC, rsWrap.getTotalRequestCount())
.addCounter(READ_REQUEST_COUNT, READ_REQUEST_COUNT_DESC, rsWrap.getReadRequestsCount())
.addCounter(WRITE_REQUEST_COUNT, WRITE_REQUEST_COUNT_DESC, rsWrap.getWriteRequestsCount())
.addCounter(CHECK_MUTATE_FAILED_COUNT,
CHECK_MUTATE_FAILED_COUNT_DESC,
rsWrap.getCheckAndMutateChecksFailed())
.addCounter(CHECK_MUTATE_PASSED_COUNT,
CHECK_MUTATE_PASSED_COUNT_DESC,
rsWrap.getCheckAndMutateChecksPassed())
.addGauge(STOREFILE_INDEX_SIZE, STOREFILE_INDEX_SIZE_DESC, rsWrap.getStoreFileIndexSize())
.addGauge(STATIC_INDEX_SIZE, STATIC_INDEX_SIZE_DESC, rsWrap.getTotalStaticIndexSize())
.addGauge(STATIC_BLOOM_SIZE, STATIC_BLOOM_SIZE_DESC, rsWrap.getTotalStaticBloomSize())
.addGauge(NUMBER_OF_PUTS_WITHOUT_WAL,
NUMBER_OF_PUTS_WITHOUT_WAL_DESC,
rsWrap.getNumPutsWithoutWAL())
.addGauge(DATA_SIZE_WITHOUT_WAL,
DATA_SIZE_WITHOUT_WAL_DESC,
rsWrap.getDataInMemoryWithoutWAL())
.addGauge(PERCENT_FILES_LOCAL, PERCENT_FILES_LOCAL_DESC, rsWrap.getPercentFileLocal())
.addGauge(COMPACTION_QUEUE_LENGTH,
COMPACTION_QUEUE_LENGTH_DESC,
rsWrap.getCompactionQueueSize())
.addGauge(FLUSH_QUEUE_LENGTH, FLUSH_QUEUE_LENGTH_DESC, rsWrap.getFlushQueueSize())
.addGauge(BLOCK_CACHE_FREE_SIZE, BLOCK_CACHE_FREE_DESC, rsWrap.getBlockCacheFreeSize())
.addGauge(BLOCK_CACHE_COUNT, BLOCK_CACHE_COUNT_DESC, rsWrap.getBlockCacheCount())
.addGauge(BLOCK_CACHE_SIZE, BLOCK_CACHE_SIZE_DESC, rsWrap.getBlockCacheSize())
.addCounter(BLOCK_CACHE_HIT_COUNT,
BLOCK_CACHE_HIT_COUNT_DESC,
rsWrap.getBlockCacheHitCount())
.addCounter(BLOCK_CACHE_MISS_COUNT,
BLOCK_COUNT_MISS_COUNT_DESC,
rsWrap.getBlockCacheMissCount())
.addCounter(BLOCK_CACHE_EVICTION_COUNT,
BLOCK_CACHE_EVICTION_COUNT_DESC,
rsWrap.getBlockCacheEvictedCount())
.addGauge(BLOCK_CACHE_HIT_PERCENT,
BLOCK_CACHE_HIT_PERCENT_DESC,
rsWrap.getBlockCacheHitPercent())
.addGauge(BLOCK_CACHE_EXPRESS_HIT_PERCENT,
BLOCK_CACHE_EXPRESS_HIT_PERCENT_DESC,
rsWrap.getBlockCacheHitCachingPercent())
.addCounter(UPDATES_BLOCKED_TIME, UPDATES_BLOCKED_DESC, rsWrap.getUpdatesBlockedTime())
.tag(ZOOKEEPER_QUORUM_NAME, ZOOKEEPER_QUORUM_DESC, rsWrap.getZookeeperQuorum())
.tag(SERVER_NAME_NAME, SERVER_NAME_DESC, rsWrap.getServerName())
.tag(CLUSTER_ID_NAME, CLUSTER_ID_DESC, rsWrap.getClusterId());
}
metricsRegistry.snapshot(mrb, all);
}
}

View File

@ -0,0 +1,163 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.impl.JmxCacheBuster;
import org.apache.hadoop.metrics2.lib.DynamicMetricsRegistry;
import org.apache.hadoop.metrics2.lib.MetricMutableCounterLong;
public class MetricsRegionSourceImpl implements MetricsRegionSource {
private final MetricsRegionWrapper regionWrapper;
private boolean closed = false;
private MetricsRegionAggregateSourceImpl agg;
private DynamicMetricsRegistry registry;
private static final Log LOG = LogFactory.getLog(MetricsRegionSourceImpl.class);
private String regionNamePrefix;
private String regionPutKey;
private String regionDeleteKey;
private String regionGetKey;
private String regionIncrementKey;
private String regionAppendKey;
private MetricMutableCounterLong regionPut;
private MetricMutableCounterLong regionDelete;
private MetricMutableCounterLong regionGet;
private MetricMutableCounterLong regionIncrement;
private MetricMutableCounterLong regionAppend;
public MetricsRegionSourceImpl(MetricsRegionWrapper regionWrapper,
MetricsRegionAggregateSourceImpl aggregate) {
this.regionWrapper = regionWrapper;
agg = aggregate;
agg.register(this);
LOG.debug("Creating new MetricsRegionSourceImpl for table " +
regionWrapper.getTableName() +
" " +
regionWrapper.getRegionName());
registry = agg.getMetricsRegistry();
regionNamePrefix = "table." + regionWrapper.getTableName() + "."
+ "region." + regionWrapper.getRegionName() + ".";
String suffix = "Count";
regionPutKey = regionNamePrefix + MetricsRegionServerSource.PUT_KEY + suffix;
regionPut = registry.getLongCounter(regionPutKey, 0l);
regionDeleteKey = regionNamePrefix + MetricsRegionServerSource.DELETE_KEY + suffix;
regionDelete = registry.getLongCounter(regionDeleteKey, 0l);
regionGetKey = regionNamePrefix + MetricsRegionServerSource.GET_KEY + suffix;
regionGet = registry.getLongCounter(regionGetKey, 0l);
regionIncrementKey = regionNamePrefix + MetricsRegionServerSource.INCREMENT_KEY + suffix;
regionIncrement = registry.getLongCounter(regionIncrementKey, 0l);
regionAppendKey = regionNamePrefix + MetricsRegionServerSource.APPEND_KEY + suffix;
regionAppend = registry.getLongCounter(regionAppendKey, 0l);
}
@Override
public void close() {
closed = true;
agg.deregister(this);
LOG.trace("Removing region Metrics: " + regionWrapper.getRegionName());
registry.removeMetric(regionPutKey);
registry.removeMetric(regionDeleteKey);
registry.removeMetric(regionGetKey);
registry.removeMetric(regionIncrementKey);
registry.removeMetric(regionAppendKey);
JmxCacheBuster.clearJmxCache();
}
@Override
public void updatePut() {
regionPut.incr();
}
@Override
public void updateDelete() {
regionDelete.incr();
}
@Override
public void updateGet() {
regionGet.incr();
}
@Override
public void updateIncrement() {
regionIncrement.incr();
}
@Override
public void updateAppend() {
regionAppend.incr();
}
@Override
public MetricsRegionAggregateSource getAggregateSource() {
return agg;
}
@Override
public int compareTo(MetricsRegionSource source) {
if (!(source instanceof MetricsRegionSourceImpl))
return -1;
MetricsRegionSourceImpl impl = (MetricsRegionSourceImpl) source;
return this.regionWrapper.getRegionName()
.compareTo(impl.regionWrapper.getRegionName());
}
void snapshot(MetricsRecordBuilder mrb, boolean ignored) {
if (closed) return;
mrb.addGauge(regionNamePrefix + MetricsRegionServerSource.STORE_COUNT,
MetricsRegionServerSource.STORE_COUNT_DESC,
this.regionWrapper.getNumStores());
mrb.addGauge(regionNamePrefix + MetricsRegionServerSource.STOREFILE_COUNT,
MetricsRegionServerSource.STOREFILE_COUNT_DESC,
this.regionWrapper.getNumStoreFiles());
mrb.addGauge(regionNamePrefix + MetricsRegionServerSource.MEMSTORE_SIZE,
MetricsRegionServerSource.MEMSTORE_SIZE_DESC,
this.regionWrapper.getMemstoreSize());
mrb.addGauge(regionNamePrefix + MetricsRegionServerSource.STOREFILE_SIZE,
MetricsRegionServerSource.STOREFILE_SIZE_DESC,
this.regionWrapper.getStoreFileSize());
mrb.addCounter(regionNamePrefix + MetricsRegionServerSource.READ_REQUEST_COUNT,
MetricsRegionServerSource.READ_REQUEST_COUNT_DESC,
this.regionWrapper.getReadRequestCount());
mrb.addCounter(regionNamePrefix + MetricsRegionServerSource.WRITE_REQUEST_COUNT,
MetricsRegionServerSource.WRITE_REQUEST_COUNT_DESC,
this.regionWrapper.getWriteRequestCount());
}
}

View File

@ -16,22 +16,22 @@
* limitations under the License.
*/
package org.apache.hadoop.hbase.replication.regionserver.metrics;
package org.apache.hadoop.hbase.replication.regionserver;
import org.apache.hadoop.hbase.metrics.BaseMetricsSourceImpl;
import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
/**
* Hadoop1 implementation of ReplicationMetricsSource. This provides access to metrics gauges and
* Hadoop1 implementation of MetricsReplicationSource. This provides access to metrics gauges and
* counters.
*/
public class ReplicationMetricsSourceImpl extends BaseMetricsSourceImpl implements
ReplicationMetricsSource {
public class MetricsReplicationSourceImpl extends BaseSourceImpl implements
MetricsReplicationSource {
public ReplicationMetricsSourceImpl() {
public MetricsReplicationSourceImpl() {
this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT);
}
ReplicationMetricsSourceImpl(String metricsName,
MetricsReplicationSourceImpl(String metricsName,
String metricsDescription,
String metricsContext,
String metricsJmxContext) {

View File

@ -16,16 +16,16 @@
* limitations under the License.
*/
package org.apache.hadoop.hbase.rest.metrics;
package org.apache.hadoop.hbase.rest;
import org.apache.hadoop.hbase.metrics.BaseMetricsSourceImpl;
import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
import org.apache.hadoop.metrics2.lib.MetricMutableCounterLong;
/**
* Hadoop One implementation of a metrics2 source that will export metrics from the Rest server to
* the hadoop metrics2 subsystem.
*/
public class RESTMetricsSourceImpl extends BaseMetricsSourceImpl implements RESTMetricsSource {
public class MetricsRESTSourceImpl extends BaseSourceImpl implements MetricsRESTSource {
private MetricMutableCounterLong request;
private MetricMutableCounterLong sucGet;
@ -35,11 +35,11 @@ public class RESTMetricsSourceImpl extends BaseMetricsSourceImpl implements REST
private MetricMutableCounterLong fPut;
private MetricMutableCounterLong fDel;
public RESTMetricsSourceImpl() {
public MetricsRESTSourceImpl() {
this(METRICS_NAME, METRICS_DESCRIPTION, CONTEXT, JMX_CONTEXT);
}
public RESTMetricsSourceImpl(String metricsName,
public MetricsRESTSourceImpl(String metricsName,
String metricsDescription,
String metricsContext,
String metricsJmxContext) {

View File

@ -16,13 +16,13 @@
* limitations under the License.
*/
package org.apache.hadoop.hbase.thrift.metrics;
package org.apache.hadoop.hbase.thrift;
/**
* Class used to create metrics sources for Thrift and Thrift2 servers in hadoop 1's compat
* library.
*/
public class ThriftServerMetricsSourceFactoryImpl implements ThriftServerMetricsSourceFactory {
public class MetricsThriftServerSourceFactoryImpl implements MetricsThriftServerSourceFactory {
/**
* A singleton used to make sure that only one thrift metrics source per server type is ever
@ -30,23 +30,23 @@ public class ThriftServerMetricsSourceFactoryImpl implements ThriftServerMetrics
*/
private static enum FactoryStorage {
INSTANCE;
ThriftServerMetricsSourceImpl thriftOne = new ThriftServerMetricsSourceImpl(METRICS_NAME,
MetricsThriftServerSourceImpl thriftOne = new MetricsThriftServerSourceImpl(METRICS_NAME,
METRICS_DESCRIPTION,
THRIFT_ONE_METRICS_CONTEXT,
THRIFT_ONE_JMX_CONTEXT);
ThriftServerMetricsSourceImpl thriftTwo = new ThriftServerMetricsSourceImpl(METRICS_NAME,
MetricsThriftServerSourceImpl thriftTwo = new MetricsThriftServerSourceImpl(METRICS_NAME,
METRICS_DESCRIPTION,
THRIFT_TWO_METRICS_CONTEXT,
THRIFT_TWO_JMX_CONTEXT);
}
@Override
public ThriftServerMetricsSource createThriftOneSource() {
public MetricsThriftServerSource createThriftOneSource() {
return FactoryStorage.INSTANCE.thriftOne;
}
@Override
public ThriftServerMetricsSource createThriftTwoSource() {
public MetricsThriftServerSource createThriftTwoSource() {
return FactoryStorage.INSTANCE.thriftTwo;
}
}

View File

@ -16,18 +16,17 @@
* limitations under the License.
*/
package org.apache.hadoop.hbase.thrift.metrics;
package org.apache.hadoop.hbase.thrift;
import org.apache.hadoop.hbase.metrics.BaseMetricsSourceImpl;
import org.apache.hadoop.hbase.thrift.metrics.ThriftServerMetricsSource;
import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
import org.apache.hadoop.metrics2.lib.MetricMutableGaugeLong;
import org.apache.hadoop.metrics2.lib.MetricMutableStat;
/**
* Hadoop 1 version of ThriftServerMetricsSource{@link ThriftServerMetricsSource}
* Hadoop 1 version of MetricsThriftServerSource{@link MetricsThriftServerSource}
*/
public class ThriftServerMetricsSourceImpl extends BaseMetricsSourceImpl implements
ThriftServerMetricsSource {
public class MetricsThriftServerSourceImpl extends BaseSourceImpl implements
MetricsThriftServerSource {
private MetricMutableStat batchGetStat;
@ -39,7 +38,7 @@ public class ThriftServerMetricsSourceImpl extends BaseMetricsSourceImpl impleme
private MetricMutableGaugeLong callQueueLenGauge;
public ThriftServerMetricsSourceImpl(String metricsName,
public MetricsThriftServerSourceImpl(String metricsName,
String metricsDescription,
String metricsContext,
String metricsJmxContext) {

View File

@ -0,0 +1,52 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metrics2.impl;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
/**
* JMX caches the beans that have been exported; even after the values are removed from hadoop's
* metrics system the keys and old values will still remain. This class stops and restarts the
* Hadoop metrics system, forcing JMX to clear the cache of exported metrics.
*
* This class need to be in the o.a.h.metrics2.impl namespace as many of the variables/calls used
* are package private.
*/
public class JmxCacheBuster {
private static final Log LOG = LogFactory.getLog(JmxCacheBuster.class);
/**
* For JMX to forget about all previously exported metrics.
*/
public static void clearJmxCache() {
LOG.trace("Clearing JMX mbean cache.");
// This is pretty extreme but it's the best way that
// I could find to get metrics to be removed.
try {
DefaultMetricsSystem.INSTANCE.stop();
DefaultMetricsSystem.INSTANCE.start();
} catch (Exception exception ) {
LOG.debug("error clearing the jmx it appears the metrics system hasn't been started", exception);
}
}
}

View File

@ -23,6 +23,8 @@ import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.metrics2.MetricsException;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.MetricsTag;
@ -39,6 +41,8 @@ import org.apache.hadoop.metrics2.MetricsTag;
*/
public class DynamicMetricsRegistry {
private final Log LOG = LogFactory.getLog(this.getClass());
/** key for the context tag */
public static final String CONTEXT_KEY = "context";
/** description for the context tag */
@ -284,6 +288,7 @@ public class DynamicMetricsRegistry {
* @param all get all the metrics even if the values are not changed.
*/
public void snapshot(MetricsRecordBuilder builder, boolean all) {
for (Entry<String, MetricsTag> entry : tags()) {
builder.add(entry.getValue());
}

View File

@ -21,9 +21,8 @@ package org.apache.hadoop.metrics2.lib;
import com.yammer.metrics.stats.ExponentiallyDecayingSample;
import com.yammer.metrics.stats.Sample;
import com.yammer.metrics.stats.Snapshot;
import org.apache.hadoop.metrics.MetricHistogram;
import org.apache.hadoop.metrics2.MetricHistogram;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.lib.MetricMutable;
import java.util.concurrent.atomic.AtomicLong;

View File

@ -20,8 +20,8 @@ package org.apache.hadoop.metrics2.lib;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.metrics.MetricHistogram;
import org.apache.hadoop.metrics.MetricsExecutor;
import org.apache.hadoop.metrics2.MetricHistogram;
import org.apache.hadoop.metrics2.MetricsExecutor;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.util.MetricQuantile;
import org.apache.hadoop.metrics2.util.MetricSampleQuantiles;

View File

@ -18,7 +18,7 @@
package org.apache.hadoop.metrics2.lib;
import org.apache.hadoop.metrics.MetricsExecutor;
import org.apache.hadoop.metrics2.MetricsExecutor;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledThreadPoolExecutor;

View File

@ -0,0 +1 @@
org.apache.hadoop.hbase.master.MetricsMasterSourceFactoryImpl

View File

@ -1 +0,0 @@
org.apache.hadoop.hbase.master.metrics.MasterMetricsSourceFactoryImpl

View File

@ -0,0 +1 @@
org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactoryImpl

View File

@ -0,0 +1 @@
org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceImpl

View File

@ -1 +0,0 @@
org.apache.hadoop.hbase.replication.regionserver.metrics.ReplicationMetricsSourceImpl

View File

@ -0,0 +1 @@
org.apache.hadoop.hbase.rest.MetricsRESTSourceImpl

View File

@ -1 +0,0 @@
org.apache.hadoop.hbase.rest.metrics.RESTMetricsSourceImpl

View File

@ -0,0 +1 @@
org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactoryImpl

View File

@ -1 +0,0 @@
org.apache.hadoop.hbase.thrift.metrics.ThriftServerMetricsSourceFactoryImpl

View File

@ -0,0 +1 @@
org.apache.hadoop.metrics2.lib.MetricsExecutorImpl

View File

@ -16,26 +16,29 @@
* limitations under the License.
*/
package org.apache.hadoop.hbase.master.metrics;
package org.apache.hadoop.hbase.master;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
import org.apache.hadoop.hbase.master.MetricsMasterSource;
import org.apache.hadoop.hbase.master.MetricsMasterSourceFactory;
import org.apache.hadoop.hbase.master.MetricsMasterSourceImpl;
import org.junit.Test;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
/**
* Test for MasterMetricsSourceImpl
* Test for MetricsMasterSourceImpl
*/
public class TestMasterMetricsSourceImpl {
public class TestMetricsMasterSourceImpl {
@Test
public void testGetInstance() throws Exception {
MasterMetricsSourceFactory masterMetricsSourceFactory = CompatibilitySingletonFactory
.getInstance(MasterMetricsSourceFactory.class);
MasterMetricsSource masterMetricsSource = masterMetricsSourceFactory.create(null);
assertTrue(masterMetricsSource instanceof MasterMetricsSourceImpl);
assertSame(masterMetricsSourceFactory, CompatibilitySingletonFactory.getInstance(MasterMetricsSourceFactory.class));
MetricsMasterSourceFactory metricsMasterSourceFactory = CompatibilitySingletonFactory
.getInstance(MetricsMasterSourceFactory.class);
MetricsMasterSource masterSource = metricsMasterSourceFactory.create(null);
assertTrue(masterSource instanceof MetricsMasterSourceImpl);
assertSame(metricsMasterSourceFactory, CompatibilitySingletonFactory.getInstance(MetricsMasterSourceFactory.class));
}
}

View File

@ -28,15 +28,15 @@ import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertSame;
/**
* Test of the default BaseMetricsSource implementation for hadoop 1
* Test of the default BaseSource implementation for hadoop 1
*/
public class TestBaseMetricsSourceImplTest {
public class TestBaseSourceImpl {
private static BaseMetricsSourceImpl bmsi;
private static BaseSourceImpl bmsi;
@BeforeClass
public static void setUp() throws Exception {
bmsi = new BaseMetricsSourceImpl("TestName", "test description", "testcontext", "TestContext");
bmsi = new BaseSourceImpl("TestName", "test description", "testcontext", "TestContext");
}
@Test
@ -81,17 +81,11 @@ public class TestBaseMetricsSourceImplTest {
}
@Test
public void testRemoveGauge() throws Exception {
public void testRemoveMetric() throws Exception {
bmsi.setGauge("testrm", 100);
bmsi.removeGauge("testrm");
bmsi.removeMetric("testrm");
assertNull(bmsi.metricsRegistry.get("testrm"));
}
@Test
public void testRemoveCounter() throws Exception {
bmsi.incCounters("testrm", 100);
bmsi.removeCounter("testrm");
assertNull(bmsi.metricsRegistry.get("testrm"));
}
}

View File

@ -0,0 +1,51 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
import org.junit.Test;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
/**
* Test for MetricsRegionServerSourceImpl
*/
public class TestMetricsRegionServerSourceImpl {
@Test
public void testGetInstance() throws Exception {
MetricsRegionServerSourceFactory metricsRegionServerSourceFactory =
CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class);
MetricsRegionServerSource serverSource =
metricsRegionServerSourceFactory.createServer(null);
assertTrue(serverSource instanceof MetricsRegionServerSourceImpl);
assertSame(metricsRegionServerSourceFactory,
CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class));
}
@Test(expected = RuntimeException.class)
public void testNoGetRegionServerMetricsSourceImpl() throws Exception {
// This should throw an exception because MetricsRegionServerSourceImpl should only
// be created by a factory.
CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceImpl.class);
}
}

View File

@ -0,0 +1,101 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
public class TestMetricsRegionSourceImpl {
@Test
public void testCompareTo() throws Exception {
MetricsRegionServerSourceFactory fact = CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class);
MetricsRegionSource one = fact.createRegion(new RegionWrapperStub("TEST"));
MetricsRegionSource oneClone = fact.createRegion(new RegionWrapperStub("TEST"));
MetricsRegionSource two = fact.createRegion(new RegionWrapperStub("TWO"));
assertEquals(0, one.compareTo(oneClone));
assertTrue( one.compareTo(two) < 0);
assertTrue( two.compareTo(one) > 0);
}
@Test(expected = RuntimeException.class)
public void testNoGetRegionServerMetricsSourceImpl() throws Exception {
// This should throw an exception because MetricsRegionSourceImpl should only
// be created by a factory.
CompatibilitySingletonFactory.getInstance(MetricsRegionSource.class);
}
class RegionWrapperStub implements MetricsRegionWrapper {
private String regionName;
public RegionWrapperStub(String regionName) {
this.regionName = regionName;
}
@Override
public String getTableName() {
return null; //To change body of implemented methods use File | Settings | File Templates.
}
@Override
public String getRegionName() {
return this.regionName;
}
@Override
public long getNumStores() {
return 0; //To change body of implemented methods use File | Settings | File Templates.
}
@Override
public long getNumStoreFiles() {
return 0; //To change body of implemented methods use File | Settings | File Templates.
}
@Override
public long getMemstoreSize() {
return 0; //To change body of implemented methods use File | Settings | File Templates.
}
@Override
public long getStoreFileSize() {
return 0; //To change body of implemented methods use File | Settings | File Templates.
}
@Override
public long getReadRequestCount() {
return 0; //To change body of implemented methods use File | Settings | File Templates.
}
@Override
public long getWriteRequestCount() {
return 0; //To change body of implemented methods use File | Settings | File Templates.
}
}
}

View File

@ -16,22 +16,24 @@
* limitations under the License.
*/
package org.apache.hadoop.hbase.replication.regionserver.metrics;
package org.apache.hadoop.hbase.replication.regionserver;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
import org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSource;
import org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceImpl;
import org.junit.Test;
import static org.junit.Assert.assertTrue;
/**
* Test to make sure that ReplicationMetricsSourceImpl is hooked up to ServiceLoader
* Test to make sure that MetricsReplicationSourceImpl is hooked up to ServiceLoader
*/
public class TestReplicationMetricsSourceImpl {
@Test
public void testGetInstance() throws Exception {
ReplicationMetricsSource rms = CompatibilitySingletonFactory
.getInstance(ReplicationMetricsSource.class);
assertTrue(rms instanceof ReplicationMetricsSourceImpl);
MetricsReplicationSource rms = CompatibilitySingletonFactory
.getInstance(MetricsReplicationSource.class);
assertTrue(rms instanceof MetricsReplicationSourceImpl);
}
}

View File

@ -16,23 +16,25 @@
* limitations under the License.
*/
package org.apache.hadoop.hbase.rest.metrics;
package org.apache.hadoop.hbase.rest;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
import org.apache.hadoop.hbase.rest.MetricsRESTSource;
import org.apache.hadoop.hbase.rest.MetricsRESTSourceImpl;
import org.junit.Test;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
/**
* Test for hadoop1's version of RESTMetricsSource
* Test for hadoop1's version of MetricsRESTSource
*/
public class TestRESTMetricsSourceImpl {
@Test
public void ensureCompatRegistered() throws Exception {
assertNotNull(CompatibilitySingletonFactory.getInstance(RESTMetricsSource.class));
assertTrue(CompatibilitySingletonFactory.getInstance(RESTMetricsSource.class) instanceof RESTMetricsSourceImpl);
assertNotNull(CompatibilitySingletonFactory.getInstance(MetricsRESTSource.class));
assertTrue(CompatibilitySingletonFactory.getInstance(MetricsRESTSource.class) instanceof MetricsRESTSourceImpl);
}
}

View File

@ -18,8 +18,8 @@
package org.apache.hadoop.hbase.test;
import org.apache.hadoop.hbase.metrics.BaseMetricsSource;
import org.apache.hadoop.hbase.metrics.BaseMetricsSourceImpl;
import org.apache.hadoop.hbase.metrics.BaseSource;
import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
import org.apache.hadoop.metrics2.Metric;
import org.apache.hadoop.metrics2.MetricsBuilder;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
@ -110,68 +110,68 @@ public class MetricsAssertHelperImpl implements MetricsAssertHelper {
}
@Override
public void assertTag(String name, String expected, BaseMetricsSource source) {
public void assertTag(String name, String expected, BaseSource source) {
getMetrics(source);
String cName = canonicalizeMetricName(name);
assertEquals("Tags should be equal", expected, tags.get(cName));
}
@Override
public void assertGauge(String name, long expected, BaseMetricsSource source) {
public void assertGauge(String name, long expected, BaseSource source) {
long found = getGaugeLong(name, source);
assertEquals("Metrics Should be equal", (long) Long.valueOf(expected), found);
}
@Override
public void assertGaugeGt(String name, long expected, BaseMetricsSource source) {
public void assertGaugeGt(String name, long expected, BaseSource source) {
double found = getGaugeDouble(name, source);
assertTrue(name + " (" + found + ") should be greater than " + expected, found > expected);
}
@Override
public void assertGaugeLt(String name, long expected, BaseMetricsSource source) {
public void assertGaugeLt(String name, long expected, BaseSource source) {
double found = getGaugeDouble(name, source);
assertTrue(name + "(" + found + ") should be less than " + expected, found < expected);
}
@Override
public void assertGauge(String name, double expected, BaseMetricsSource source) {
public void assertGauge(String name, double expected, BaseSource source) {
double found = getGaugeDouble(name, source);
assertEquals("Metrics Should be equal", (double) Double.valueOf(expected), found);
assertEquals("Metrics Should be equal", (double) Double.valueOf(expected), found, 0.01);
}
@Override
public void assertGaugeGt(String name, double expected, BaseMetricsSource source) {
public void assertGaugeGt(String name, double expected, BaseSource source) {
double found = getGaugeDouble(name, source);
assertTrue(name + "(" + found + ") should be greater than " + expected, found > expected);
}
@Override
public void assertGaugeLt(String name, double expected, BaseMetricsSource source) {
public void assertGaugeLt(String name, double expected, BaseSource source) {
double found = getGaugeDouble(name, source);
assertTrue(name + "(" + found + ") should be less than " + expected, found < expected);
}
@Override
public void assertCounter(String name, long expected, BaseMetricsSource source) {
public void assertCounter(String name, long expected, BaseSource source) {
long found = getCounter(name, source);
assertEquals("Metrics Counters should be equal", (long) Long.valueOf(expected), found);
}
@Override
public void assertCounterGt(String name, long expected, BaseMetricsSource source) {
public void assertCounterGt(String name, long expected, BaseSource source) {
long found = getCounter(name, source);
assertTrue(name + " (" + found + ") should be greater than " + expected, found > expected);
}
@Override
public void assertCounterLt(String name, long expected, BaseMetricsSource source) {
public void assertCounterLt(String name, long expected, BaseSource source) {
long found = getCounter(name, source);
assertTrue(name + "(" + found + ") should be less than " + expected, found < expected);
}
@Override
public long getCounter(String name, BaseMetricsSource source) {
public long getCounter(String name, BaseSource source) {
getMetrics(source);
String cName = canonicalizeMetricName(name);
assertNotNull(counters.get(cName));
@ -179,7 +179,7 @@ public class MetricsAssertHelperImpl implements MetricsAssertHelper {
}
@Override
public double getGaugeDouble(String name, BaseMetricsSource source) {
public double getGaugeDouble(String name, BaseSource source) {
getMetrics(source);
String cName = canonicalizeMetricName(name);
assertNotNull(gauges.get(cName));
@ -187,7 +187,7 @@ public class MetricsAssertHelperImpl implements MetricsAssertHelper {
}
@Override
public long getGaugeLong(String name, BaseMetricsSource source) {
public long getGaugeLong(String name, BaseSource source) {
getMetrics(source);
String cName = canonicalizeMetricName(name);
assertNotNull(gauges.get(cName));
@ -200,12 +200,12 @@ public class MetricsAssertHelperImpl implements MetricsAssertHelper {
counters.clear();
}
private void getMetrics(BaseMetricsSource source) {
private void getMetrics(BaseSource source) {
reset();
if (!(source instanceof BaseMetricsSourceImpl)) {
if (!(source instanceof BaseSourceImpl)) {
assertTrue(false);
}
BaseMetricsSourceImpl impl = (BaseMetricsSourceImpl) source;
BaseSourceImpl impl = (BaseSourceImpl) source;
impl.getMetrics(new MockMetricsBuilder(), true);

View File

@ -16,9 +16,11 @@
* limitations under the License.
*/
package org.apache.hadoop.hbase.thrift.metrics;
package org.apache.hadoop.hbase.thrift;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
import org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactory;
import org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactoryImpl;
import org.junit.Test;
import static org.junit.Assert.assertNotNull;
@ -26,28 +28,28 @@ import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
/**
* Test the hadoop 1 version of ThriftServerMetricsSourceFactory
* Test the hadoop 1 version of MetricsThriftServerSourceFactory
*/
public class TestThriftServerMetricsSourceFactoryImpl {
@Test
public void testCompatabilityRegistered() throws Exception {
assertNotNull(CompatibilitySingletonFactory.getInstance(ThriftServerMetricsSourceFactory.class));
assertTrue(CompatibilitySingletonFactory.getInstance(ThriftServerMetricsSourceFactory.class) instanceof ThriftServerMetricsSourceFactoryImpl);
assertNotNull(CompatibilitySingletonFactory.getInstance(MetricsThriftServerSourceFactory.class));
assertTrue(CompatibilitySingletonFactory.getInstance(MetricsThriftServerSourceFactory.class) instanceof MetricsThriftServerSourceFactoryImpl);
}
@Test
public void testCreateThriftOneSource() throws Exception {
//Make sure that the factory gives back a singleton.
assertSame(new ThriftServerMetricsSourceFactoryImpl().createThriftOneSource(),
new ThriftServerMetricsSourceFactoryImpl().createThriftOneSource());
assertSame(new MetricsThriftServerSourceFactoryImpl().createThriftOneSource(),
new MetricsThriftServerSourceFactoryImpl().createThriftOneSource());
}
@Test
public void testCreateThriftTwoSource() throws Exception {
//Make sure that the factory gives back a singleton.
assertSame(new ThriftServerMetricsSourceFactoryImpl().createThriftTwoSource(),
new ThriftServerMetricsSourceFactoryImpl().createThriftTwoSource());
assertSame(new MetricsThriftServerSourceFactoryImpl().createThriftTwoSource(),
new MetricsThriftServerSourceFactoryImpl().createThriftTwoSource());
}
}

View File

@ -138,6 +138,10 @@ limitations under the License.
<groupId>com.yammer.metrics</groupId>
<artifactId>metrics-core</artifactId>
</dependency>
<dependency>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
</dependency>
<!-- This was marked as test dep in earlier pom, but was scoped compile. Where
do we actually need it? -->
<dependency>

View File

@ -16,22 +16,22 @@
* limitations under the License.
*/
package org.apache.hadoop.hbase.master.metrics;
package org.apache.hadoop.hbase.master;
/**
* Factory to create MasterMetricsSource when given a MasterMetricsWrapper
* Factory to create MetricsMasterSource when given a MetricsMasterWrapper
*/
public class MasterMetricsSourceFactoryImpl implements MasterMetricsSourceFactory {
public class MetricsMasterSourceFactoryImpl implements MetricsMasterSourceFactory {
private static enum FactoryStorage {
INSTANCE;
MasterMetricsSource source;
MetricsMasterSource masterSource;
}
@Override
public synchronized MasterMetricsSource create(MasterMetricsWrapper beanWrapper) {
if (FactoryStorage.INSTANCE.source == null ) {
FactoryStorage.INSTANCE.source = new MasterMetricsSourceImpl(beanWrapper);
public synchronized MetricsMasterSource create(MetricsMasterWrapper masterWrapper) {
if (FactoryStorage.INSTANCE.masterSource == null) {
FactoryStorage.INSTANCE.masterSource = new MetricsMasterSourceImpl(masterWrapper);
}
return FactoryStorage.INSTANCE.source;
return FactoryStorage.INSTANCE.masterSource;
}
}

View File

@ -16,9 +16,9 @@
* limitations under the License.
*/
package org.apache.hadoop.hbase.master.metrics;
package org.apache.hadoop.hbase.master;
import org.apache.hadoop.hbase.metrics.BaseMetricsSourceImpl;
import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
import org.apache.hadoop.metrics2.MetricsCollector;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.lib.Interns;
@ -26,32 +26,33 @@ import org.apache.hadoop.metrics2.lib.MutableCounterLong;
import org.apache.hadoop.metrics2.lib.MutableGaugeLong;
import org.apache.hadoop.metrics2.lib.MutableHistogram;
/** Hadoop2 implementation of MasterMetricsSource. */
public class MasterMetricsSourceImpl
extends BaseMetricsSourceImpl implements MasterMetricsSource {
/**
* Hadoop2 implementation of MetricsMasterSource.
*/
public class MetricsMasterSourceImpl
extends BaseSourceImpl implements MetricsMasterSource {
MutableCounterLong clusterRequestsCounter;
MutableGaugeLong ritGauge;
MutableGaugeLong ritCountOverThresholdGauge;
MutableGaugeLong ritOldestAgeGauge;
private final MasterMetricsWrapper masterWrapper;
private final MetricsMasterWrapper masterWrapper;
private MutableCounterLong clusterRequestsCounter;
private MutableGaugeLong ritGauge;
private MutableGaugeLong ritCountOverThresholdGauge;
private MutableGaugeLong ritOldestAgeGauge;
private MutableHistogram splitTimeHisto;
private MutableHistogram splitSizeHisto;
public MasterMetricsSourceImpl(MasterMetricsWrapper masterMetricsWrapper) {
public MetricsMasterSourceImpl(MetricsMasterWrapper masterWrapper) {
this(METRICS_NAME,
METRICS_DESCRIPTION,
METRICS_CONTEXT,
METRICS_JMX_CONTEXT,
masterMetricsWrapper);
masterWrapper);
}
public MasterMetricsSourceImpl(String metricsName,
public MetricsMasterSourceImpl(String metricsName,
String metricsDescription,
String metricsContext,
String metricsJmxContext,
MasterMetricsWrapper masterWrapper) {
MetricsMasterWrapper masterWrapper) {
super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
this.masterWrapper = masterWrapper;
@ -104,9 +105,9 @@ public class MasterMetricsSourceImpl
if (masterWrapper != null) {
metricsRecordBuilder
.addGauge(Interns.info(MASTER_ACTIVE_TIME_NAME,
MASTER_ACTIVE_TIME_DESC), masterWrapper.getMasterActiveTime())
MASTER_ACTIVE_TIME_DESC), masterWrapper.getActiveTime())
.addGauge(Interns.info(MASTER_START_TIME_NAME,
MASTER_START_TIME_DESC), masterWrapper.getMasterStartTime())
MASTER_START_TIME_DESC), masterWrapper.getStartTime())
.addGauge(Interns.info(AVERAGE_LOAD_NAME, AVERAGE_LOAD_DESC),
masterWrapper.getAverageLoad())
.addGauge(Interns.info(NUM_REGION_SERVERS_NAME,
@ -123,7 +124,7 @@ public class MasterMetricsSourceImpl
String.valueOf(masterWrapper.getIsActiveMaster()));
}
metricsRegistry.snapshot(metricsRecordBuilder, true);
metricsRegistry.snapshot(metricsRecordBuilder, all);
}
}

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.metrics;
import org.apache.hadoop.metrics2.MetricsCollector;
import org.apache.hadoop.metrics2.MetricsSource;
import org.apache.hadoop.metrics2.impl.JmxCacheBuster;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.lib.DynamicMetricsRegistry;
import org.apache.hadoop.metrics2.lib.MetricMutableQuantiles;
@ -29,9 +30,9 @@ import org.apache.hadoop.metrics2.lib.MutableHistogram;
import org.apache.hadoop.metrics2.source.JvmMetrics;
/**
* Hadoop 2 implementation of BaseMetricsSource (using metrics2 framework)
* Hadoop 2 implementation of BaseSource (using metrics2 framework)
*/
public class BaseMetricsSourceImpl implements BaseMetricsSource, MetricsSource {
public class BaseSourceImpl implements BaseSource, MetricsSource {
private static enum DefaultMetricsSystemInitializer {
INSTANCE;
@ -47,15 +48,13 @@ public class BaseMetricsSourceImpl implements BaseMetricsSource, MetricsSource {
}
}
public static final String HBASE_METRICS_SYSTEM_NAME = "hbase";
protected final DynamicMetricsRegistry metricsRegistry;
protected final String metricsName;
protected final String metricsDescription;
protected final String metricsContext;
protected final String metricsJmxContext;
public BaseMetricsSourceImpl(
public BaseSourceImpl(
String metricsName,
String metricsDescription,
String metricsContext,
@ -141,20 +140,12 @@ public class BaseMetricsSourceImpl implements BaseMetricsSource, MetricsSource {
*
* @param key
*/
public void removeGauge(String key) {
public void removeMetric(String key) {
metricsRegistry.removeMetric(key);
JmxCacheBuster.clearJmxCache();
}
/**
* Remove a named counter.
*
* @param key
*/
public void removeCounter(String key) {
metricsRegistry.removeMetric(key);
}
protected DynamicMetricsRegistry getMetricsRegistry() {
public DynamicMetricsRegistry getMetricsRegistry() {
return metricsRegistry;
}

View File

@ -0,0 +1,82 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
import org.apache.hadoop.metrics2.MetricsCollector;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import java.util.TreeSet;
public class MetricsRegionAggregateSourceImpl extends BaseSourceImpl
implements MetricsRegionAggregateSource {
private final Log LOG = LogFactory.getLog(this.getClass());
private final TreeSet<MetricsRegionSourceImpl> regionSources =
new TreeSet<MetricsRegionSourceImpl>();
public MetricsRegionAggregateSourceImpl() {
this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT);
}
public MetricsRegionAggregateSourceImpl(String metricsName,
String metricsDescription,
String metricsContext,
String metricsJmxContext) {
super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
}
@Override
public void register(MetricsRegionSource source) {
regionSources.add((MetricsRegionSourceImpl) source);
}
@Override
public void deregister(MetricsRegionSource source) {
regionSources.remove(source);
}
/**
* Yes this is a get function that doesn't return anything. Thanks Hadoop for breaking all
* expectations of java programmers. Instead of returning anything Hadoop metrics expects
* getMetrics to push the metrics into the collector.
*
* @param collector the collector
* @param all get all the metrics regardless of when they last changed.
*/
@Override
public void getMetrics(MetricsCollector collector, boolean all) {
MetricsRecordBuilder mrb = collector.addRecord(metricsName)
.setContext(metricsContext);
if (regionSources != null) {
for (MetricsRegionSourceImpl regionMetricSource : regionSources) {
regionMetricSource.snapshot(mrb, all);
}
}
metricsRegistry.snapshot(mrb, all);
}
}

View File

@ -0,0 +1,52 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;
/**
* Factory to create MetricsRegionServerSource when given a MetricsRegionServerWrapper
*/
public class MetricsRegionServerSourceFactoryImpl implements MetricsRegionServerSourceFactory {
private static enum FactoryStorage {
INSTANCE;
private MetricsRegionServerSource serverSource;
private MetricsRegionAggregateSourceImpl aggImpl;
}
private synchronized MetricsRegionAggregateSourceImpl getAggregate() {
if (FactoryStorage.INSTANCE.aggImpl == null) {
FactoryStorage.INSTANCE.aggImpl = new MetricsRegionAggregateSourceImpl();
}
return FactoryStorage.INSTANCE.aggImpl;
}
@Override
public synchronized MetricsRegionServerSource createServer(MetricsRegionServerWrapper regionServerWrapper) {
if (FactoryStorage.INSTANCE.serverSource == null) {
FactoryStorage.INSTANCE.serverSource = new MetricsRegionServerSourceImpl(
regionServerWrapper);
}
return FactoryStorage.INSTANCE.serverSource;
}
@Override
public MetricsRegionSource createRegion(MetricsRegionWrapper wrapper) {
return new MetricsRegionSourceImpl(wrapper, getAggregate());
}
}

View File

@ -0,0 +1,164 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;
import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
import org.apache.hadoop.metrics2.MetricHistogram;
import org.apache.hadoop.metrics2.MetricsCollector;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.lib.Interns;
/**
* Hadoop1 implementation of MetricsRegionServerSource.
*/
public class MetricsRegionServerSourceImpl
extends BaseSourceImpl implements MetricsRegionServerSource {
final MetricsRegionServerWrapper rsWrap;
private final MetricHistogram putHisto;
private final MetricHistogram deleteHisto;
private final MetricHistogram getHisto;
private final MetricHistogram incrementHisto;
private final MetricHistogram appendHisto;
public MetricsRegionServerSourceImpl(MetricsRegionServerWrapper rsWrap) {
this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT, rsWrap);
}
public MetricsRegionServerSourceImpl(String metricsName,
String metricsDescription,
String metricsContext,
String metricsJmxContext,
MetricsRegionServerWrapper rsWrap) {
super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
this.rsWrap = rsWrap;
putHisto = getMetricsRegistry().getHistogram(PUT_KEY);
deleteHisto = getMetricsRegistry().getHistogram(DELETE_KEY);
getHisto = getMetricsRegistry().getHistogram(GET_KEY);
incrementHisto = getMetricsRegistry().getHistogram(INCREMENT_KEY);
appendHisto = getMetricsRegistry().getHistogram(APPEND_KEY);
}
@Override
public void init() {
super.init();
}
@Override
public void updatePut(long t) {
putHisto.add(t);
}
@Override
public void updateDelete(long t) {
deleteHisto.add(t);
}
@Override
public void updateGet(long t) {
getHisto.add(t);
}
@Override
public void updateIncrement(long t) {
incrementHisto.add(t);
}
@Override
public void updateAppend(long t) {
appendHisto.add(t);
}
/**
* Yes this is a get function that doesn't return anything. Thanks Hadoop for breaking all
* expectations of java programmers. Instead of returning anything Hadoop metrics expects
* getMetrics to push the metrics into the collector.
*
* @param metricsCollector Collector to accept metrics
* @param all push all or only changed?
*/
@Override
public void getMetrics(MetricsCollector metricsCollector, boolean all) {
MetricsRecordBuilder mrb = metricsCollector.addRecord(metricsName)
.setContext(metricsContext);
// rsWrap can be null because this function is called inside of init.
if (rsWrap != null) {
mrb.addGauge(Interns.info(REGION_COUNT, REGION_COUNT_DESC), rsWrap.getNumOnlineRegions())
.addGauge(Interns.info(STORE_COUNT, STORE_COUNT_DESC), rsWrap.getNumStores())
.addGauge(Interns.info(STOREFILE_COUNT, STOREFILE_COUNT_DESC), rsWrap.getNumStoreFiles())
.addGauge(Interns.info(MEMSTORE_SIZE, MEMSTORE_SIZE_DESC), rsWrap.getMemstoreSize())
.addGauge(Interns.info(STOREFILE_SIZE, STOREFILE_SIZE_DESC), rsWrap.getStoreFileSize())
.addGauge(Interns.info(RS_START_TIME_NAME, RS_START_TIME_DESC),
rsWrap.getStartCode())
.addCounter(Interns.info(TOTAL_REQUEST_COUNT, TOTAL_REQUEST_COUNT_DESC),
rsWrap.getTotalRequestCount())
.addCounter(Interns.info(READ_REQUEST_COUNT, READ_REQUEST_COUNT_DESC),
rsWrap.getReadRequestsCount())
.addCounter(Interns.info(WRITE_REQUEST_COUNT, WRITE_REQUEST_COUNT_DESC),
rsWrap.getWriteRequestsCount())
.addCounter(Interns.info(CHECK_MUTATE_FAILED_COUNT, CHECK_MUTATE_FAILED_COUNT_DESC),
rsWrap.getCheckAndMutateChecksFailed())
.addCounter(Interns.info(CHECK_MUTATE_PASSED_COUNT, CHECK_MUTATE_PASSED_COUNT_DESC),
rsWrap.getCheckAndMutateChecksPassed())
.addGauge(Interns.info(STOREFILE_INDEX_SIZE, STOREFILE_INDEX_SIZE_DESC),
rsWrap.getStoreFileIndexSize())
.addGauge(Interns.info(STATIC_INDEX_SIZE, STATIC_INDEX_SIZE_DESC),
rsWrap.getTotalStaticIndexSize())
.addGauge(Interns.info(STATIC_BLOOM_SIZE, STATIC_BLOOM_SIZE_DESC),
rsWrap.getTotalStaticBloomSize())
.addGauge(Interns.info(NUMBER_OF_PUTS_WITHOUT_WAL, NUMBER_OF_PUTS_WITHOUT_WAL_DESC),
rsWrap.getNumPutsWithoutWAL())
.addGauge(Interns.info(DATA_SIZE_WITHOUT_WAL, DATA_SIZE_WITHOUT_WAL_DESC),
rsWrap.getDataInMemoryWithoutWAL())
.addGauge(Interns.info(PERCENT_FILES_LOCAL, PERCENT_FILES_LOCAL_DESC),
rsWrap.getPercentFileLocal())
.addGauge(Interns.info(COMPACTION_QUEUE_LENGTH, COMPACTION_QUEUE_LENGTH_DESC),
rsWrap.getCompactionQueueSize())
.addGauge(Interns.info(FLUSH_QUEUE_LENGTH, FLUSH_QUEUE_LENGTH_DESC),
rsWrap.getFlushQueueSize())
.addGauge(Interns.info(BLOCK_CACHE_FREE_SIZE, BLOCK_CACHE_FREE_DESC),
rsWrap.getBlockCacheFreeSize())
.addGauge(Interns.info(BLOCK_CACHE_COUNT, BLOCK_CACHE_COUNT_DESC),
rsWrap.getBlockCacheCount())
.addGauge(Interns.info(BLOCK_CACHE_SIZE, BLOCK_CACHE_SIZE_DESC),
rsWrap.getBlockCacheSize())
.addCounter(Interns.info(BLOCK_CACHE_HIT_COUNT, BLOCK_CACHE_HIT_COUNT_DESC),
rsWrap.getBlockCacheHitCount())
.addCounter(Interns.info(BLOCK_CACHE_MISS_COUNT, BLOCK_COUNT_MISS_COUNT_DESC),
rsWrap.getBlockCacheMissCount())
.addCounter(Interns.info(BLOCK_CACHE_EVICTION_COUNT, BLOCK_CACHE_EVICTION_COUNT_DESC),
rsWrap.getBlockCacheEvictedCount())
.addGauge(Interns.info(BLOCK_CACHE_HIT_PERCENT, BLOCK_CACHE_HIT_PERCENT_DESC),
rsWrap.getBlockCacheHitPercent())
.addGauge(Interns.info(BLOCK_CACHE_EXPRESS_HIT_PERCENT,
BLOCK_CACHE_EXPRESS_HIT_PERCENT_DESC), rsWrap.getBlockCacheHitCachingPercent())
.addCounter(Interns.info(UPDATES_BLOCKED_TIME, UPDATES_BLOCKED_DESC),
rsWrap.getUpdatesBlockedTime())
.tag(Interns.info(ZOOKEEPER_QUORUM_NAME, ZOOKEEPER_QUORUM_DESC),
rsWrap.getZookeeperQuorum())
.tag(Interns.info(SERVER_NAME_NAME, SERVER_NAME_DESC), rsWrap.getServerName())
.tag(Interns.info(CLUSTER_ID_NAME, CLUSTER_ID_DESC), rsWrap.getClusterId());
}
metricsRegistry.snapshot(mrb, all);
}
}

View File

@ -0,0 +1,158 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.impl.JmxCacheBuster;
import org.apache.hadoop.metrics2.lib.DynamicMetricsRegistry;
import org.apache.hadoop.metrics2.lib.Interns;
import org.apache.hadoop.metrics2.lib.MutableCounterLong;
public class MetricsRegionSourceImpl implements MetricsRegionSource {
private final MetricsRegionWrapper regionWrapper;
private boolean closed = false;
private MetricsRegionAggregateSourceImpl agg;
private DynamicMetricsRegistry registry;
private static final Log LOG = LogFactory.getLog(MetricsRegionSourceImpl.class);
private String regionNamePrefix;
private String regionPutKey;
private String regionDeleteKey;
private String regionGetKey;
private String regionIncrementKey;
private String regionAppendKey;
private MutableCounterLong regionPut;
private MutableCounterLong regionDelete;
private MutableCounterLong regionGet;
private MutableCounterLong regionIncrement;
private MutableCounterLong regionAppend;
public MetricsRegionSourceImpl(MetricsRegionWrapper regionWrapper,
MetricsRegionAggregateSourceImpl aggregate) {
this.regionWrapper = regionWrapper;
agg = aggregate;
agg.register(this);
LOG.debug("Creating new MetricsRegionSourceImpl for table " +
regionWrapper.getTableName() +
" " +
regionWrapper.getRegionName());
registry = agg.getMetricsRegistry();
regionNamePrefix = "table." + regionWrapper.getTableName() + "."
+ "region." + regionWrapper.getRegionName() + ".";
String suffix = "Count";
regionPutKey = regionNamePrefix + MetricsRegionServerSource.PUT_KEY + suffix;
regionPut = registry.getLongCounter(regionPutKey, 0l);
regionDeleteKey = regionNamePrefix + MetricsRegionServerSource.DELETE_KEY + suffix;
regionDelete = registry.getLongCounter(regionDeleteKey, 0l);
regionGetKey = regionNamePrefix + MetricsRegionServerSource.GET_KEY + suffix;
regionGet = registry.getLongCounter(regionGetKey, 0l);
regionIncrementKey = regionNamePrefix + MetricsRegionServerSource.INCREMENT_KEY + suffix;
regionIncrement = registry.getLongCounter(regionIncrementKey, 0l);
regionAppendKey = regionNamePrefix + MetricsRegionServerSource.APPEND_KEY + suffix;
regionAppend = registry.getLongCounter(regionAppendKey, 0l);
}
@Override
public void close() {
closed = true;
agg.deregister(this);
LOG.trace("Removing region Metrics: " + regionWrapper.getRegionName());
registry.removeMetric(regionPutKey);
registry.removeMetric(regionDeleteKey);
registry.removeMetric(regionGetKey);
registry.removeMetric(regionIncrementKey);
registry.removeMetric(regionAppendKey);
JmxCacheBuster.clearJmxCache();
}
@Override
public void updatePut() {
regionPut.incr();
}
@Override
public void updateDelete() {
regionDelete.incr();
}
@Override
public void updateGet() {
regionGet.incr();
}
@Override
public void updateIncrement() {
regionIncrement.incr();
}
@Override
public void updateAppend() {
regionAppend.incr();
}
@Override
public MetricsRegionAggregateSource getAggregateSource() {
return agg;
}
@Override
public int compareTo(MetricsRegionSource source) {
if (!(source instanceof MetricsRegionSourceImpl))
return -1;
MetricsRegionSourceImpl impl = (MetricsRegionSourceImpl) source;
return this.regionWrapper.getRegionName()
.compareTo(impl.regionWrapper.getRegionName());
}
void snapshot(MetricsRecordBuilder mrb, boolean ignored) {
if (closed) return;
mrb.addGauge(
Interns.info(regionNamePrefix + MetricsRegionServerSource.STORE_COUNT,
MetricsRegionServerSource.STORE_COUNT_DESC),
this.regionWrapper.getNumStores());
mrb.addGauge(Interns.info(regionNamePrefix + MetricsRegionServerSource.STOREFILE_COUNT,
MetricsRegionServerSource.STOREFILE_COUNT_DESC),
this.regionWrapper.getNumStoreFiles());
mrb.addGauge(Interns.info(regionNamePrefix + MetricsRegionServerSource.MEMSTORE_SIZE,
MetricsRegionServerSource.MEMSTORE_SIZE_DESC),
this.regionWrapper.getMemstoreSize());
mrb.addGauge(Interns.info(regionNamePrefix + MetricsRegionServerSource.STOREFILE_SIZE,
MetricsRegionServerSource.STOREFILE_SIZE_DESC),
this.regionWrapper.getStoreFileSize());
}
}

View File

@ -16,23 +16,23 @@
* limitations under the License.
*/
package org.apache.hadoop.hbase.replication.regionserver.metrics;
package org.apache.hadoop.hbase.replication.regionserver;
import org.apache.hadoop.hbase.metrics.BaseMetricsSourceImpl;
import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
/**
* Hadoop2 implementation of ReplicationMetricsSource. This provides access to metrics gauges and
* Hadoop2 implementation of MetricsReplicationSource. This provides access to metrics gauges and
* counters.
*/
public class ReplicationMetricsSourceImpl extends BaseMetricsSourceImpl implements
ReplicationMetricsSource {
public class MetricsReplicationSourceImpl extends BaseSourceImpl implements
MetricsReplicationSource {
public ReplicationMetricsSourceImpl() {
public MetricsReplicationSourceImpl() {
this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT);
}
ReplicationMetricsSourceImpl(String metricsName,
MetricsReplicationSourceImpl(String metricsName,
String metricsDescription,
String metricsContext,
String metricsJmxContext) {

View File

@ -16,16 +16,16 @@
* limitations under the License.
*/
package org.apache.hadoop.hbase.rest.metrics;
package org.apache.hadoop.hbase.rest;
import org.apache.hadoop.hbase.metrics.BaseMetricsSourceImpl;
import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
import org.apache.hadoop.metrics2.lib.MutableCounterLong;
/**
* Hadoop Two implementation of a metrics2 source that will export metrics from the Rest server to
* the hadoop metrics2 subsystem.
*/
public class RESTMetricsSourceImpl extends BaseMetricsSourceImpl implements RESTMetricsSource {
public class MetricsRESTSourceImpl extends BaseSourceImpl implements MetricsRESTSource {
private MutableCounterLong request;
private MutableCounterLong sucGet;
@ -35,11 +35,11 @@ public class RESTMetricsSourceImpl extends BaseMetricsSourceImpl implements REST
private MutableCounterLong fPut;
private MutableCounterLong fDel;
public RESTMetricsSourceImpl() {
public MetricsRESTSourceImpl() {
this(METRICS_NAME, METRICS_DESCRIPTION, CONTEXT, JMX_CONTEXT);
}
public RESTMetricsSourceImpl(String metricsName,
public MetricsRESTSourceImpl(String metricsName,
String metricsDescription,
String metricsContext,
String metricsJmxContext) {

View File

@ -16,12 +16,12 @@
* limitations under the License.
*/
package org.apache.hadoop.hbase.thrift.metrics;
package org.apache.hadoop.hbase.thrift;
/**
* Class used to create metrics sources for Thrift and Thrift2 servers.
*/
public class ThriftServerMetricsSourceFactoryImpl implements ThriftServerMetricsSourceFactory {
public class MetricsThriftServerSourceFactoryImpl implements MetricsThriftServerSourceFactory {
/**
* A singleton used to make sure that only one thrift metrics source per server type is ever
@ -29,23 +29,23 @@ public class ThriftServerMetricsSourceFactoryImpl implements ThriftServerMetrics
*/
private static enum FactoryStorage {
INSTANCE;
ThriftServerMetricsSourceImpl thriftOne = new ThriftServerMetricsSourceImpl(METRICS_NAME,
MetricsThriftServerSourceImpl thriftOne = new MetricsThriftServerSourceImpl(METRICS_NAME,
METRICS_DESCRIPTION,
THRIFT_ONE_METRICS_CONTEXT,
THRIFT_ONE_JMX_CONTEXT);
ThriftServerMetricsSourceImpl thriftTwo = new ThriftServerMetricsSourceImpl(METRICS_NAME,
MetricsThriftServerSourceImpl thriftTwo = new MetricsThriftServerSourceImpl(METRICS_NAME,
METRICS_DESCRIPTION,
THRIFT_TWO_METRICS_CONTEXT,
THRIFT_TWO_JMX_CONTEXT);
}
@Override
public ThriftServerMetricsSource createThriftOneSource() {
public MetricsThriftServerSource createThriftOneSource() {
return FactoryStorage.INSTANCE.thriftOne;
}
@Override
public ThriftServerMetricsSource createThriftTwoSource() {
public MetricsThriftServerSource createThriftTwoSource() {
return FactoryStorage.INSTANCE.thriftTwo;
}
}

View File

@ -16,18 +16,17 @@
* limitations under the License.
*/
package org.apache.hadoop.hbase.thrift.metrics;
package org.apache.hadoop.hbase.thrift;
import org.apache.hadoop.hbase.metrics.BaseMetricsSourceImpl;
import org.apache.hadoop.hbase.thrift.metrics.ThriftServerMetricsSource;
import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
import org.apache.hadoop.metrics2.lib.MutableGaugeLong;
import org.apache.hadoop.metrics2.lib.MutableStat;
/**
* Hadoop 2 version of ThriftServerMetricsSource{@link ThriftServerMetricsSource}
* Hadoop 2 version of MetricsThriftServerSource{@link org.apache.hadoop.hbase.thrift.MetricsThriftServerSource}
*/
public class ThriftServerMetricsSourceImpl extends BaseMetricsSourceImpl implements
ThriftServerMetricsSource {
public class MetricsThriftServerSourceImpl extends BaseSourceImpl implements
MetricsThriftServerSource {
private MutableStat batchGetStat;
private MutableStat batchMutateStat;
@ -38,7 +37,7 @@ public class ThriftServerMetricsSourceImpl extends BaseMetricsSourceImpl impleme
private MutableGaugeLong callQueueLenGauge;
public ThriftServerMetricsSourceImpl(String metricsName,
public MetricsThriftServerSourceImpl(String metricsName,
String metricsDescription,
String metricsContext,
String metricsJmxContext) {

View File

@ -0,0 +1,54 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metrics2.impl;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
/**
* JMX caches the beans that have been exported; even after the values are removed from hadoop's
* metrics system the keys and old values will still remain. This class stops and restarts the
* Hadoop metrics system, forcing JMX to clear the cache of exported metrics.
*
* This class need to be in the o.a.h.metrics2.impl namespace as many of the variables/calls used
* are package private.
*/
public class JmxCacheBuster {
private static final Log LOG = LogFactory.getLog(JmxCacheBuster.class);
/**
* For JMX to forget about all previously exported metrics.
*/
public static void clearJmxCache() {
LOG.trace("Clearing JMX mbean cache.");
// This is pretty extreme but it's the best way that
// I could find to get metrics to be removed.
try {
if (DefaultMetricsSystem.instance() != null ) {
DefaultMetricsSystem.instance().stop();
DefaultMetricsSystem.instance().start();
}
} catch (Exception exception ) {
LOG.debug("error clearing the jmx it appears the metrics system hasn't been started", exception);
}
}
}

View File

@ -528,6 +528,7 @@ public class DynamicMetricsRegistry {
return returnExistingWithCast(metric, metricClass, name);
}
@SuppressWarnings("unchecked")
private<T> T returnExistingWithCast(MutableMetric metric,
Class<T> metricClass, String name) {
if (!metricClass.isAssignableFrom(metric.getClass())) {

View File

@ -22,8 +22,8 @@ import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.metrics.MetricHistogram;
import org.apache.hadoop.metrics.MetricsExecutor;
import org.apache.hadoop.metrics2.MetricHistogram;
import org.apache.hadoop.metrics2.MetricsExecutor;
import org.apache.hadoop.metrics2.MetricsInfo;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.util.MetricQuantile;

View File

@ -18,7 +18,7 @@
package org.apache.hadoop.metrics2.lib;
import org.apache.hadoop.metrics.MetricsExecutor;
import org.apache.hadoop.metrics2.MetricsExecutor;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledThreadPoolExecutor;

View File

@ -22,7 +22,7 @@ import com.yammer.metrics.stats.ExponentiallyDecayingSample;
import com.yammer.metrics.stats.Sample;
import com.yammer.metrics.stats.Snapshot;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.metrics.MetricHistogram;
import org.apache.hadoop.metrics2.MetricHistogram;
import org.apache.hadoop.metrics2.MetricsInfo;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;

View File

@ -0,0 +1 @@
org.apache.hadoop.hbase.master.MetricsMasterSourceFactoryImpl

View File

@ -1 +0,0 @@
org.apache.hadoop.hbase.master.metrics.MasterMetricsSourceFactoryImpl

View File

@ -0,0 +1 @@
org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactoryImpl

View File

@ -0,0 +1 @@
org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceImpl

View File

@ -1 +0,0 @@
org.apache.hadoop.hbase.replication.regionserver.metrics.ReplicationMetricsSourceImpl

View File

@ -0,0 +1 @@
org.apache.hadoop.hbase.rest.MetricsRESTSourceImpl

View File

@ -1 +0,0 @@
org.apache.hadoop.hbase.rest.metrics.RESTMetricsSourceImpl

View File

@ -0,0 +1 @@
org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactoryImpl

View File

@ -1 +0,0 @@
org.apache.hadoop.hbase.thrift.metrics.ThriftServerMetricsSourceFactoryImpl

View File

@ -0,0 +1 @@
org.apache.hadoop.metrics2.lib.MetricsExecutorImpl

View File

@ -16,26 +16,29 @@
* limitations under the License.
*/
package org.apache.hadoop.hbase.master.metrics;
package org.apache.hadoop.hbase.master;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
import org.apache.hadoop.hbase.master.MetricsMasterSource;
import org.apache.hadoop.hbase.master.MetricsMasterSourceFactory;
import org.apache.hadoop.hbase.master.MetricsMasterSourceImpl;
import org.junit.Test;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
/**
* Test for MasterMetricsSourceImpl
* Test for MetricsMasterSourceImpl
*/
public class TestMasterMetricsSourceImpl {
public class TestMetricsMasterSourceImpl {
@Test
public void testGetInstance() throws Exception {
MasterMetricsSourceFactory masterMetricsSourceFactory = CompatibilitySingletonFactory
.getInstance(MasterMetricsSourceFactory.class);
MasterMetricsSource masterMetricsSource = masterMetricsSourceFactory.create(null);
assertTrue(masterMetricsSource instanceof MasterMetricsSourceImpl);
assertSame(masterMetricsSourceFactory, CompatibilitySingletonFactory.getInstance(MasterMetricsSourceFactory.class));
MetricsMasterSourceFactory metricsMasterSourceFactory = CompatibilitySingletonFactory
.getInstance(MetricsMasterSourceFactory.class);
MetricsMasterSource masterSource = metricsMasterSourceFactory.create(null);
assertTrue(masterSource instanceof MetricsMasterSourceImpl);
assertSame(metricsMasterSourceFactory, CompatibilitySingletonFactory.getInstance(MetricsMasterSourceFactory.class));
}
}

View File

@ -27,15 +27,15 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
/**
* Test of default BaseMetricsSource for hadoop 2
* Test of default BaseSource for hadoop 2
*/
public class TestBaseMetricsSourceImpl {
public class TestBaseSourceImpl {
private static BaseMetricsSourceImpl bmsi;
private static BaseSourceImpl bmsi;
@BeforeClass
public static void setUp() throws Exception {
bmsi = new BaseMetricsSourceImpl("TestName", "test description", "testcontext", "TestContext");
bmsi = new BaseSourceImpl("TestName", "test description", "testcontext", "TestContext");
}
@Test
@ -75,16 +75,10 @@ public class TestBaseMetricsSourceImpl {
}
@Test
public void testRemoveGauge() throws Exception {
public void testRemoveMetric() throws Exception {
bmsi.setGauge("testrmgauge", 100);
bmsi.removeGauge("testrmgauge");
bmsi.removeMetric("testrmgauge");
assertNull(bmsi.metricsRegistry.get("testrmgauge"));
}
@Test
public void testRemoveCounter() throws Exception {
bmsi.incCounters("testrmcounter", 100);
bmsi.removeCounter("testrmcounter");
assertNull(bmsi.metricsRegistry.get("testrmcounter"));
}
}

View File

@ -0,0 +1,50 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
import org.junit.Test;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
/**
* Test for MetricsRegionServerSourceImpl
*/
public class TestMetricsRegionServerSourceImpl {
@Test
public void testGetInstance() throws Exception {
MetricsRegionServerSourceFactory metricsRegionServerSourceFactory =
CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class);
MetricsRegionServerSource serverSource =
metricsRegionServerSourceFactory.createServer(null);
assertTrue(serverSource instanceof MetricsRegionServerSourceImpl);
assertSame(metricsRegionServerSourceFactory,
CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class));
}
@Test(expected = RuntimeException.class)
public void testNoGetRegionServerMetricsSourceImpl() throws Exception {
// This should throw an exception because MetricsRegionServerSourceImpl should only
// be created by a factory.
CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceImpl.class);
}
}

View File

@ -0,0 +1,101 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
public class TestMetricsRegionSourceImpl {
@Test
public void testCompareTo() throws Exception {
MetricsRegionServerSourceFactory fact = CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class);
MetricsRegionSource one = fact.createRegion(new RegionWrapperStub("TEST"));
MetricsRegionSource oneClone = fact.createRegion(new RegionWrapperStub("TEST"));
MetricsRegionSource two = fact.createRegion(new RegionWrapperStub("TWO"));
assertEquals(0, one.compareTo(oneClone));
assertTrue( one.compareTo(two) < 0);
assertTrue( two.compareTo(one) > 0);
}
@Test(expected = RuntimeException.class)
public void testNoGetRegionServerMetricsSourceImpl() throws Exception {
// This should throw an exception because MetricsRegionSourceImpl should only
// be created by a factory.
CompatibilitySingletonFactory.getInstance(MetricsRegionSource.class);
}
class RegionWrapperStub implements MetricsRegionWrapper {
private String regionName;
public RegionWrapperStub(String regionName) {
this.regionName = regionName;
}
@Override
public String getTableName() {
return null; //To change body of implemented methods use File | Settings | File Templates.
}
@Override
public String getRegionName() {
return this.regionName;
}
@Override
public long getNumStores() {
return 0; //To change body of implemented methods use File | Settings | File Templates.
}
@Override
public long getNumStoreFiles() {
return 0; //To change body of implemented methods use File | Settings | File Templates.
}
@Override
public long getMemstoreSize() {
return 0; //To change body of implemented methods use File | Settings | File Templates.
}
@Override
public long getStoreFileSize() {
return 0; //To change body of implemented methods use File | Settings | File Templates.
}
@Override
public long getReadRequestCount() {
return 0; //To change body of implemented methods use File | Settings | File Templates.
}
@Override
public long getWriteRequestCount() {
return 0; //To change body of implemented methods use File | Settings | File Templates.
}
}
}

View File

@ -16,20 +16,22 @@
* limitations under the License.
*/
package org.apache.hadoop.hbase.replication.regionserver.metrics;
package org.apache.hadoop.hbase.replication.regionserver;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
import org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSource;
import org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceImpl;
import org.junit.Test;
import static org.junit.Assert.assertTrue;
/** Test for ReplicationMetricsSourceImpl */
public class TestReplicationMetricsSourceImpl {
/** Test for MetricsReplicationSourceImpl */
public class TestMetricsReplicationSourceImpl {
@Test
public void testGetInstance() throws Exception {
ReplicationMetricsSource rms = CompatibilitySingletonFactory
.getInstance(ReplicationMetricsSource.class);
assertTrue(rms instanceof ReplicationMetricsSourceImpl);
MetricsReplicationSource rms = CompatibilitySingletonFactory
.getInstance(MetricsReplicationSource.class);
assertTrue(rms instanceof MetricsReplicationSourceImpl);
}
}

View File

@ -16,23 +16,25 @@
* limitations under the License.
*/
package org.apache.hadoop.hbase.rest.metrics;
package org.apache.hadoop.hbase.rest;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
import org.apache.hadoop.hbase.rest.MetricsRESTSource;
import org.apache.hadoop.hbase.rest.MetricsRESTSourceImpl;
import org.junit.Test;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
/**
* Test for hadoop 2's version of RESTMetricsSource
* Test for hadoop 2's version of MetricsRESTSource
*/
public class TestRESTMetricsSourceImpl {
public class TestMetricsRESTSourceImpl {
@Test
public void ensureCompatRegistered() throws Exception {
assertNotNull(CompatibilitySingletonFactory.getInstance(RESTMetricsSource.class));
assertTrue(CompatibilitySingletonFactory.getInstance(RESTMetricsSource.class) instanceof RESTMetricsSourceImpl);
assertNotNull(CompatibilitySingletonFactory.getInstance(MetricsRESTSource.class));
assertTrue(CompatibilitySingletonFactory.getInstance(MetricsRESTSource.class) instanceof MetricsRESTSourceImpl);
}
}

View File

@ -18,8 +18,8 @@
package org.apache.hadoop.hbase.test;
import org.apache.hadoop.hbase.metrics.BaseMetricsSource;
import org.apache.hadoop.hbase.metrics.BaseMetricsSourceImpl;
import org.apache.hadoop.hbase.metrics.BaseSource;
import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
import org.apache.hadoop.metrics2.AbstractMetric;
import org.apache.hadoop.metrics2.MetricsCollector;
import org.apache.hadoop.metrics2.MetricsInfo;
@ -129,68 +129,68 @@ public class MetricsAssertHelperImpl implements MetricsAssertHelper {
}
@Override
public void assertTag(String name, String expected, BaseMetricsSource source) {
public void assertTag(String name, String expected, BaseSource source) {
getMetrics(source);
String cName = canonicalizeMetricName(name);
assertEquals("Tags should be equal", expected, tags.get(cName));
}
@Override
public void assertGauge(String name, long expected, BaseMetricsSource source) {
public void assertGauge(String name, long expected, BaseSource source) {
long found = getGaugeLong(name, source);
assertEquals("Metrics Should be equal", (long) Long.valueOf(expected), found);
}
@Override
public void assertGaugeGt(String name, long expected, BaseMetricsSource source) {
public void assertGaugeGt(String name, long expected, BaseSource source) {
double found = getGaugeDouble(name, source);
assertTrue(name + " (" + found + ") should be greater than " + expected, found > expected);
}
@Override
public void assertGaugeLt(String name, long expected, BaseMetricsSource source) {
public void assertGaugeLt(String name, long expected, BaseSource source) {
double found = getGaugeDouble(name, source);
assertTrue(name + "(" + found + ") should be less than " + expected, found < expected);
}
@Override
public void assertGauge(String name, double expected, BaseMetricsSource source) {
public void assertGauge(String name, double expected, BaseSource source) {
double found = getGaugeDouble(name, source);
assertEquals("Metrics Should be equal", (double) Double.valueOf(expected), found);
assertEquals("Metrics Should be equal", (double) Double.valueOf(expected), found, 0.01);
}
@Override
public void assertGaugeGt(String name, double expected, BaseMetricsSource source) {
public void assertGaugeGt(String name, double expected, BaseSource source) {
double found = getGaugeDouble(name, source);
assertTrue(name + "(" + found + ") should be greater than " + expected, found > expected);
}
@Override
public void assertGaugeLt(String name, double expected, BaseMetricsSource source) {
public void assertGaugeLt(String name, double expected, BaseSource source) {
double found = getGaugeDouble(name, source);
assertTrue(name + "(" + found + ") should be less than " + expected, found < expected);
}
@Override
public void assertCounter(String name, long expected, BaseMetricsSource source) {
public void assertCounter(String name, long expected, BaseSource source) {
long found = getCounter(name, source);
assertEquals("Metrics Counters should be equal", (long) Long.valueOf(expected), found);
}
@Override
public void assertCounterGt(String name, long expected, BaseMetricsSource source) {
public void assertCounterGt(String name, long expected, BaseSource source) {
long found = getCounter(name, source);
assertTrue(name + " (" + found + ") should be greater than " + expected, found > expected);
}
@Override
public void assertCounterLt(String name, long expected, BaseMetricsSource source) {
public void assertCounterLt(String name, long expected, BaseSource source) {
long found = getCounter(name, source);
assertTrue(name + "(" + found + ") should be less than " + expected, found < expected);
}
@Override
public long getCounter(String name, BaseMetricsSource source) {
public long getCounter(String name, BaseSource source) {
getMetrics(source);
String cName = canonicalizeMetricName(name);
assertNotNull(counters.get(cName));
@ -198,7 +198,7 @@ public class MetricsAssertHelperImpl implements MetricsAssertHelper {
}
@Override
public double getGaugeDouble(String name, BaseMetricsSource source) {
public double getGaugeDouble(String name, BaseSource source) {
getMetrics(source);
String cName = canonicalizeMetricName(name);
assertNotNull(gauges.get(cName));
@ -206,7 +206,7 @@ public class MetricsAssertHelperImpl implements MetricsAssertHelper {
}
@Override
public long getGaugeLong(String name, BaseMetricsSource source) {
public long getGaugeLong(String name, BaseSource source) {
getMetrics(source);
String cName = canonicalizeMetricName(name);
assertNotNull(gauges.get(cName));
@ -220,12 +220,12 @@ public class MetricsAssertHelperImpl implements MetricsAssertHelper {
counters.clear();
}
private void getMetrics(BaseMetricsSource source) {
private void getMetrics(BaseSource source) {
reset();
if (!(source instanceof BaseMetricsSourceImpl)) {
if (!(source instanceof BaseSourceImpl)) {
assertTrue(false);
}
BaseMetricsSourceImpl impl = (BaseMetricsSourceImpl) source;
BaseSourceImpl impl = (BaseSourceImpl) source;
impl.getMetrics(new MockMetricsBuilder(), true);

View File

@ -16,9 +16,11 @@
* limitations under the License.
*/
package org.apache.hadoop.hbase.thrift.metrics;
package org.apache.hadoop.hbase.thrift;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
import org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactory;
import org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactoryImpl;
import org.junit.Test;
import static org.junit.Assert.assertNotNull;
@ -26,28 +28,28 @@ import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
/**
* Test for hadoop 2's version of ThriftServerMetricsSourceFactory
* Test for hadoop 2's version of MetricsThriftServerSourceFactory
*/
public class TestThriftServerMetricsSourceFactoryImpl {
public class TestMetricsThriftServerSourceFactoryImpl {
@Test
public void testCompatabilityRegistered() throws Exception {
assertNotNull(CompatibilitySingletonFactory.getInstance(ThriftServerMetricsSourceFactory.class));
assertTrue(CompatibilitySingletonFactory.getInstance(ThriftServerMetricsSourceFactory.class) instanceof ThriftServerMetricsSourceFactoryImpl);
assertNotNull(CompatibilitySingletonFactory.getInstance(MetricsThriftServerSourceFactory.class));
assertTrue(CompatibilitySingletonFactory.getInstance(MetricsThriftServerSourceFactory.class) instanceof MetricsThriftServerSourceFactoryImpl);
}
@Test
public void testCreateThriftOneSource() throws Exception {
//Make sure that the factory gives back a singleton.
assertSame(new ThriftServerMetricsSourceFactoryImpl().createThriftOneSource(),
new ThriftServerMetricsSourceFactoryImpl().createThriftOneSource());
assertSame(new MetricsThriftServerSourceFactoryImpl().createThriftOneSource(),
new MetricsThriftServerSourceFactoryImpl().createThriftOneSource());
}
@Test
public void testCreateThriftTwoSource() throws Exception {
//Make sure that the factory gives back a singleton.
assertSame(new ThriftServerMetricsSourceFactoryImpl().createThriftTwoSource(),
new ThriftServerMetricsSourceFactoryImpl().createThriftTwoSource());
assertSame(new MetricsThriftServerSourceFactoryImpl().createThriftTwoSource(),
new MetricsThriftServerSourceFactoryImpl().createThriftTwoSource());
}
}

View File

@ -24,7 +24,6 @@ String format = "html";
<%import>
java.util.*;
org.apache.hadoop.hbase.regionserver.HRegionServer;
org.apache.hadoop.hbase.regionserver.metrics.RegionServerMetrics;
org.apache.hadoop.hbase.util.Bytes;
org.apache.hadoop.hbase.HRegionInfo;
org.apache.hadoop.hbase.ServerName;
@ -38,7 +37,6 @@ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad;
<%java return; %>
</%if>
<%java>
RegionServerMetrics metrics = regionServer.getMetrics();
ServerInfo serverInfo = ProtobufUtil.getServerInfo(regionServer);
ServerName serverName = ProtobufUtil.toServerName(serverInfo.getServerName());
List<HRegionInfo> onlineRegions = ProtobufUtil.getOnlineRegions(regionServer);
@ -98,7 +96,7 @@ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad;
</div>
<h2>Server Metrics</h2>
<& ServerMetricsTmpl; metrics = metrics; &>
<& ServerMetricsTmpl; mWrap = regionServer.getMetrics().getRegionServerWrapper(); &>
<& ../common/TaskMonitorTmpl; filter = filter &>

View File

@ -23,7 +23,6 @@
<%import>
java.util.*;
org.apache.hadoop.hbase.regionserver.HRegionServer;
org.apache.hadoop.hbase.regionserver.metrics.RegionServerMetrics;
org.apache.hadoop.hbase.util.Bytes;
org.apache.hadoop.hbase.HRegionInfo;
org.apache.hadoop.hbase.ServerName;

View File

@ -17,12 +17,12 @@ See the License for the specific language governing permissions and
limitations under the License.
</%doc>
<%args>
RegionServerMetrics metrics;
MetricsRegionServerWrapper mWrap;
</%args>
<%import>
java.util.*;
org.apache.hadoop.hbase.regionserver.HRegionServer;
org.apache.hadoop.hbase.regionserver.metrics.RegionServerMetrics;
org.apache.hadoop.hbase.regionserver.MetricsRegionServerWrapper;
org.apache.hadoop.hbase.util.Bytes;
org.apache.hadoop.hbase.HRegionInfo;
org.apache.hadoop.hbase.ServerName;
@ -42,36 +42,32 @@ java.lang.management.ManagementFactory;
<li class=""><a href="#storeStats" data-toggle="tab">Storefiles</a></li>
<li class=""><a href="#queueStats" data-toggle="tab">Queues</a></li>
<li class=""><a href="#blockCacheStats" data-toggle="tab">Block Cache</a></li>
<li class=""><a href="#latencyStats" data-toggle="tab">Latency</a></li>
</ul>
<div class="tab-content" style="padding-bottom: 9px; border-bottom: 1px solid #ddd;">
<div class="tab-pane active" id="baseStats">
<& baseStats; metrics = metrics; &>
<& baseStats; mWrap = mWrap &>
</div>
<div class="tab-pane" id="memoryStats">
<& memoryStats; metrics = metrics; &>
<& memoryStats; mWrap = mWrap &>
</div>
<div class="tab-pane" id="requestStats">
<& requestStats; metrics = metrics; &>
<& requestStats; mWrap = mWrap &>
</div>
<div class="tab-pane" id="storeStats">
<& storeStats; metrics = metrics; &>
<& storeStats; mWrap = mWrap &>
</div>
<div class="tab-pane" id="queueStats">
<& queueStats; metrics = metrics; &>
<& queueStats; mWrap = mWrap &>
</div>
<div class="tab-pane" id="blockCacheStats">
<& blockCacheStats; metrics = metrics; &>
</div>
<div class="tab-pane" id="latencyStats">
<& latencyStats; metrics = metrics; &>
<& blockCacheStats; mWrap = mWrap &>
</div>
</div>
</div>
<%def baseStats>
<%args>
RegionServerMetrics metrics;
MetricsRegionServerWrapper mWrap;
</%args>
<table class="table table-striped">
<tr>
@ -82,17 +78,17 @@ java.lang.management.ManagementFactory;
<th>Slow HLog Append Count</th>
</tr>
<tr>
<td><% metrics.requests.getPreviousIntervalValue() %></td>
<td><% metrics.regions.get() %></td>
<td><% metrics.hdfsBlocksLocalityIndex.get() %></td>
<td><% metrics.slowHLogAppendCount.get() %></td>
<td><% mWrap.getRequestsPerSecond() %></td>
<td><% mWrap.getNumOnlineRegions() %></td>
<td><% mWrap.getPercentFileLocal() %></td>
<td><% 0 %></td>
</tr>
</table>
</%def>
<%def memoryStats>
<%args>
RegionServerMetrics metrics;
MetricsRegionServerWrapper mWrap;
</%args>
<table class="table table-striped">
<tr>
@ -104,19 +100,19 @@ java.lang.management.ManagementFactory;
</tr>
<tr>
<td>
<% ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getUsed() / (1024*1024) %>MB
<% ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getUsed() %>
</td>
<td>
<% ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax() / (1024*1024) %>MB
<% ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax()%>
</td>
<td><% metrics.memstoreSizeMB.get()%>MB</td>
<td><% mWrap.getMemstoreSize() %></td>
</tr>
</table>
</%def>
<%def storeStats>
<%args>
RegionServerMetrics metrics;
MetricsRegionServerWrapper mWrap;
</%args>
<table class="table table-striped">
<tr>
@ -128,11 +124,11 @@ java.lang.management.ManagementFactory;
<th>Bloom Size</th>
</tr>
<tr>
<td><% metrics.stores.get() %></td>
<td><% metrics.storefiles.get() %></td>
<td><% metrics.rootIndexSizeKB.get() %>KB</td>
<td><% metrics.totalStaticIndexSizeKB.get() %>KB</td>
<td><% metrics.totalStaticBloomSizeKB.get() %>KB</td>
<td><% mWrap.getNumStores() %></td>
<td><% mWrap.getNumStoreFiles() %></td>
<td><% mWrap.getStoreFileIndexSize() %></td>
<td><% mWrap.getTotalStaticIndexSize() %></td>
<td><% mWrap.getTotalStaticBloomSize() %></td>
</tr>
</table>
</%def>
@ -140,7 +136,7 @@ java.lang.management.ManagementFactory;
<%def requestStats>
<%args>
RegionServerMetrics metrics;
MetricsRegionServerWrapper mWrap;
</%args>
<table class="table table-striped">
<tr>
@ -149,16 +145,16 @@ java.lang.management.ManagementFactory;
<th>Write Request Count</th>
</tr>
<tr>
<td><% metrics.requests.getPreviousIntervalValue() %></td>
<td><% metrics.readRequestsCount.get() %></td>
<td><% metrics.writeRequestsCount.get() %>KB</td>
<td><% mWrap.getRequestsPerSecond() %></td>
<td><% mWrap.getReadRequestsCount() %></td>
<td><% mWrap.getWriteRequestsCount() %>KB</td>
</tr>
</table>
</%def>
<%def queueStats>
<%args>
RegionServerMetrics metrics;
MetricsRegionServerWrapper mWrap;
</%args>
<table class="table table-striped">
<tr>
@ -167,8 +163,8 @@ java.lang.management.ManagementFactory;
</tr>
<tr>
<td><% metrics.compactionQueueSize.get() %></td>
<td><% metrics.flushQueueSize.get() %>KB</td>
<td><% mWrap.getCompactionQueueSize() %></td>
<td><% mWrap.getFlushQueueSize() %></td>
</tr>
</table>
</%def>
@ -176,7 +172,7 @@ java.lang.management.ManagementFactory;
<%def blockCacheStats>
<%args>
RegionServerMetrics metrics;
MetricsRegionServerWrapper mWrap;
</%args>
<table class="table table-striped">
<tr>
@ -190,57 +186,13 @@ java.lang.management.ManagementFactory;
</tr>
<tr>
<td><% metrics.blockCacheSize.get() / (1024*1024) %>MB</td>
<td><% metrics.blockCacheFree.get() / (1024 * 1024) %>MB</td>
<td><% metrics.blockCacheCount.get()%></td>
<td><% metrics.blockCacheHitCount.get()%></td>
<td><% metrics.blockCacheMissCount.get()%></td>
<td><% metrics.blockCacheHitRatio.get()%>%</td>
<td><% metrics.blockCacheEvictedCount.get()%></td>
<td><% mWrap.getBlockCacheSize()%></td>
<td><% mWrap.getBlockCacheFreeSize()%></td>
<td><% mWrap.getBlockCacheCount() %></td>
<td><% mWrap.getBlockCacheHitCount() %></td>
<td><% mWrap.getBlockCacheMissCount() %></td>
<td><% mWrap.getBlockCacheHitPercent() %>%</td>
<td><% mWrap.getBlockCacheEvictedCount() %></td>
</tr>
</table>
</%def>
<%def latencyStats>
<%args>
RegionServerMetrics metrics;
</%args>
<table class="table table-striped">
<tr>
<th>Operation</th>
<th>Count</th>
<th>Mean</th>
<th>Median</th>
<th>75th</th>
<th>95th</th>
<th>99th</th>
<th>99.9th</th>
</tr>
<& histogramRow; op ="FS Read"; histo = metrics.fsReadLatencyHistogram &>
<& histogramRow; op ="FS PRead"; histo = metrics.fsPreadLatencyHistogram &>
<& histogramRow; op ="FS Write"; histo = metrics.fsWriteLatencyHistogram &>
</table>
</%def>
<%def histogramRow>
<%args>
String op;
MetricsHistogram histo;
</%args>
<%java>
Snapshot s = histo.getSnapshot();
</%java>
<tr>
<td><% op %></td>
<td><% histo.getCount()%></td>
<td><% String.format("%10.2f", histo.getMean()) %></td>
<td><% String.format("%10.2f", s.getMedian()) %></td>
<td><% String.format("%10.2f", s.get75thPercentile()) %></td>
<td><% String.format("%10.2f", s.get95thPercentile()) %></td>
<td><% String.format("%10.2f", s.get99thPercentile()) %></td>
<td><% String.format("%10.2f", s.get999thPercentile())%></td>
</tr>
</%def>

View File

@ -30,15 +30,13 @@ import org.apache.hadoop.hbase.fs.HFileSystem;
import org.apache.hadoop.hbase.io.compress.Compression;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo;
import org.apache.hadoop.hbase.regionserver.metrics.SchemaConfigured;
import org.apache.hadoop.io.RawComparator;
/**
* Common functionality needed by all versions of {@link HFile} readers.
*/
@InterfaceAudience.Private
public abstract class AbstractHFileReader extends SchemaConfigured
implements HFile.Reader {
public abstract class AbstractHFileReader implements HFile.Reader {
/** Filesystem-level block reader for this HFile format version. */
protected HFileBlock.FSReader fsBlockReader;
@ -119,7 +117,6 @@ public abstract class AbstractHFileReader extends SchemaConfigured
final long fileSize,
final boolean closeIStream,
final CacheConfig cacheConf, final HFileSystem hfs) {
super(null, path);
this.trailer = trailer;
this.compressAlgo = trailer.getCompressionCodec();
this.cacheConf = cacheConf;

View File

@ -34,7 +34,6 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue.KeyComparator;
import org.apache.hadoop.hbase.io.compress.Compression;
import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo;
import org.apache.hadoop.hbase.regionserver.metrics.SchemaConfigured;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.io.RawComparator;
@ -44,8 +43,7 @@ import org.apache.hadoop.io.Writable;
* Common functionality needed by all versions of {@link HFile} writers.
*/
@InterfaceAudience.Private
public abstract class AbstractHFileWriter extends SchemaConfigured
implements HFile.Writer {
public abstract class AbstractHFileWriter implements HFile.Writer {
/** Key previously appended. Becomes the last key in the file. */
protected byte[] lastKeyBuffer = null;
@ -116,7 +114,6 @@ public abstract class AbstractHFileWriter extends SchemaConfigured
Compression.Algorithm compressAlgo,
HFileDataBlockEncoder dataBlockEncoder,
KeyComparator comparator) {
super(null, path);
this.outputStream = outputStream;
this.path = path;
this.name = path != null ? path.getName() : outputStream.toString();

View File

@ -23,7 +23,6 @@ import java.nio.ByteBuffer;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.io.HeapSize;
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
/**
* Cacheable is an interface that allows for an object to be cached. If using an
@ -57,14 +56,4 @@ public interface Cacheable extends HeapSize {
*/
public CacheableDeserializer<Cacheable> getDeserializer();
/**
* @return the block type of this cached HFile block
*/
public BlockType getBlockType();
/**
* @return the metrics object identified by table and column family
*/
public SchemaMetrics getSchemaMetrics();
}

Some files were not shown because too many files have changed in this diff Show More