diff --git a/dev-support/findbugs-exclude.xml b/dev-support/findbugs-exclude.xml
index 5ab62e43b47..33c3c3c4f6a 100644
--- a/dev-support/findbugs-exclude.xml
+++ b/dev-support/findbugs-exclude.xml
@@ -50,7 +50,7 @@
-
+
diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilitySingletonFactory.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilitySingletonFactory.java
index 97e887fe08d..2b2c53d7000 100644
--- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilitySingletonFactory.java
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilitySingletonFactory.java
@@ -44,6 +44,7 @@ public class CompatibilitySingletonFactory extends CompatibilityFactory {
*
* @return the singleton
*/
+ @SuppressWarnings("unchecked")
public static synchronized T getInstance(Class klass) {
T instance = (T) instances.get(klass);
if (instance == null) {
diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java
similarity index 91%
rename from hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSource.java
rename to hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java
index 8fcfaf01b0b..1350b0182fa 100644
--- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSource.java
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java
@@ -16,29 +16,29 @@
* limitations under the License.
*/
-package org.apache.hadoop.hbase.master.metrics;
+package org.apache.hadoop.hbase.master;
-import org.apache.hadoop.hbase.metrics.BaseMetricsSource;
+import org.apache.hadoop.hbase.metrics.BaseSource;
/**
* Interface that classes that expose metrics about the master will implement.
*/
-public interface MasterMetricsSource extends BaseMetricsSource {
+public interface MetricsMasterSource extends BaseSource {
/**
* The name of the metrics
*/
- static final String METRICS_NAME = "HMaster";
+ static final String METRICS_NAME = "Server";
/**
* The context metrics will be under.
*/
- static final String METRICS_CONTEXT = "hmaster";
+ static final String METRICS_CONTEXT = "master";
/**
* The name of the metrics context that metrics will be under in jmx
*/
- static final String METRICS_JMX_CONTEXT = "HMaster";
+ static final String METRICS_JMX_CONTEXT = "Master,sub=" + METRICS_NAME;
/**
* Description
@@ -76,24 +76,28 @@ public interface MasterMetricsSource extends BaseMetricsSource {
/**
* Increment the number of requests the cluster has seen.
+ *
* @param inc Ammount to increment the total by.
*/
void incRequests(final int inc);
/**
* Set the number of regions in transition.
+ *
* @param ritCount count of the regions in transition.
*/
void setRIT(int ritCount);
/**
* Set the count of the number of regions that have been in transition over the threshold time.
+ *
* @param ritCountOverThreshold number of regions in transition for longer than threshold.
*/
void setRITCountOverThreshold(int ritCountOverThreshold);
/**
* Set the oldest region in transition.
+ *
* @param age age of the oldest RIT.
*/
void setRITOldestAge(long age);
diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceFactory.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactory.java
similarity index 76%
rename from hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceFactory.java
rename to hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactory.java
index 157b2deaadf..63a85a33528 100644
--- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceFactory.java
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactory.java
@@ -16,13 +16,13 @@
* limitations under the License.
*/
-package org.apache.hadoop.hbase.master.metrics;
+package org.apache.hadoop.hbase.master;
/**
- * Interface of a factory to create MasterMetricsSource when given a MasterMetricsWrapper
+ * Interface of a factory to create MetricsMasterSource when given a MetricsMasterWrapper
*/
-public interface MasterMetricsSourceFactory {
+public interface MetricsMasterSourceFactory {
- MasterMetricsSource create(MasterMetricsWrapper beanWrapper);
+ MetricsMasterSource create(MetricsMasterWrapper masterWrapper);
}
diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsWrapper.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapper.java
similarity index 90%
rename from hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsWrapper.java
rename to hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapper.java
index ff416eb637e..838676356f4 100644
--- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsWrapper.java
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapper.java
@@ -16,13 +16,13 @@
* limitations under the License.
*/
-package org.apache.hadoop.hbase.master.metrics;
+package org.apache.hadoop.hbase.master;
/**
* This is the interface that will expose information to hadoop1/hadoop2 implementations of the
- * MasterMetricsSource.
+ * MetricsMasterSource.
*/
-public interface MasterMetricsWrapper {
+public interface MetricsMasterWrapper {
/**
* Get ServerName
@@ -31,54 +31,63 @@ public interface MasterMetricsWrapper {
/**
* Get Average Load
+ *
* @return Average Load
*/
double getAverageLoad();
/**
* Get the Cluster ID
+ *
* @return Cluster ID
*/
String getClusterId();
/**
* Get the Zookeeper Quorum Info
+ *
* @return Zookeeper Quorum Info
*/
String getZookeeperQuorum();
/**
* Get the co-processors
+ *
* @return Co-processors
*/
String[] getCoprocessors();
/**
* Get hbase master start time
+ *
* @return Start time of master in milliseconds
*/
- long getMasterStartTime();
+ long getStartTime();
/**
* Get the hbase master active time
+ *
* @return Time in milliseconds when master became active
*/
- long getMasterActiveTime();
+ long getActiveTime();
/**
* Whether this master is the active master
+ *
* @return True if this is the active master
*/
boolean getIsActiveMaster();
/**
* Get the live region servers
+ *
* @return Live region servers
*/
int getRegionServers();
/**
* Get the dead region servers
+ *
* @return Dead region Servers
*/
int getDeadRegionServers();
diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseMetricsSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSource.java
similarity index 87%
rename from hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseMetricsSource.java
rename to hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSource.java
index e8cefefc516..20139c4c9d4 100644
--- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseMetricsSource.java
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSource.java
@@ -19,9 +19,11 @@
package org.apache.hadoop.hbase.metrics;
/**
- * BaseMetricsSource for dynamic metrics to announce to Metrics2
+ * BaseSource for dynamic metrics to announce to Metrics2
*/
-public interface BaseMetricsSource {
+public interface BaseSource {
+
+ public static final String HBASE_METRICS_SYSTEM_NAME = "HBase";
/**
* Clear out the metrics and re-prepare the source.
@@ -53,11 +55,11 @@ public interface BaseMetricsSource {
void decGauge(String gaugeName, long delta);
/**
- * Remove a gauge and no longer announce it.
+ * Remove a metric and no longer announce it.
*
* @param key Name of the gauge to remove.
*/
- void removeGauge(String key);
+ void removeMetric(String key);
/**
* Add some amount to a counter.
@@ -84,12 +86,4 @@ public interface BaseMetricsSource {
*/
void updateQuantile(String name, long value);
-
- /**
- * Remove a counter and stop announcing it to metrics2.
- *
- * @param key
- */
- void removeCounter(String key);
-
}
diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSource.java
new file mode 100644
index 00000000000..5e6e27323f9
--- /dev/null
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSource.java
@@ -0,0 +1,62 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.hadoop.hbase.metrics.BaseSource;
+
+/**
+ * This interface will be implemented by a MetricsSource that will export metrics from
+ * multiple regions into the hadoop metrics system.
+ */
+public interface MetricsRegionAggregateSource extends BaseSource {
+
+ /**
+ * The name of the metrics
+ */
+ static final String METRICS_NAME = "Regions";
+
+ /**
+ * The name of the metrics context that metrics will be under.
+ */
+ static final String METRICS_CONTEXT = "regionserver";
+
+ /**
+ * Description
+ */
+ static final String METRICS_DESCRIPTION = "Metrics about HBase RegionServer regions and tables";
+
+ /**
+ * The name of the metrics context that metrics will be under in jmx
+ */
+ static final String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME;
+
+ /**
+ * Register a MetricsRegionSource as being open.
+ *
+ * @param source the source for the region being opened.
+ */
+ void register(MetricsRegionSource source);
+
+ /**
+ * Remove a region's source. This is called when a region is closed.
+ *
+ * @param source The region to remove.
+ */
+ void deregister(MetricsRegionSource source);
+}
diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
new file mode 100644
index 00000000000..0ed4fee41ab
--- /dev/null
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
@@ -0,0 +1,166 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.hadoop.hbase.metrics.BaseSource;
+
+/**
+ * Interface for classes that expose metrics about the regionserver.
+ */
+public interface MetricsRegionServerSource extends BaseSource {
+
+ /**
+ * The name of the metrics
+ */
+ static final String METRICS_NAME = "Server";
+
+ /**
+ * The name of the metrics context that metrics will be under.
+ */
+ static final String METRICS_CONTEXT = "regionserver";
+
+ /**
+ * Description
+ */
+ static final String METRICS_DESCRIPTION = "Metrics about HBase RegionServer";
+
+ /**
+ * The name of the metrics context that metrics will be under in jmx
+ */
+ static final String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME;
+
+ /**
+ * Update the Put time histogram
+ *
+ * @param t time it took
+ */
+ void updatePut(long t);
+
+ /**
+ * Update the Delete time histogram
+ *
+ * @param t time it took
+ */
+ void updateDelete(long t);
+
+ /**
+ * Update the Get time histogram .
+ *
+ * @param t time it took
+ */
+ void updateGet(long t);
+
+ /**
+ * Update the Increment time histogram.
+ *
+ * @param t time it took
+ */
+ void updateIncrement(long t);
+
+ /**
+ * Update the Append time histogram.
+ *
+ * @param t time it took
+ */
+ void updateAppend(long t);
+
+ // Strings used for exporting to metrics system.
+ static final String REGION_COUNT = "regionCount";
+ static final String REGION_COUNT_DESC = "Number of regions";
+ static final String STORE_COUNT = "storeCount";
+ static final String STORE_COUNT_DESC = "Number of Stores";
+ static final String STOREFILE_COUNT = "storeFileCount";
+ static final String STOREFILE_COUNT_DESC = "Number of Store Files";
+ static final String MEMSTORE_SIZE = "memStoreSize";
+ static final String MEMSTORE_SIZE_DESC = "Size of the memstore";
+ static final String STOREFILE_SIZE = "storeFileSize";
+ static final String STOREFILE_SIZE_DESC = "Size of storefiles being served.";
+ static final String TOTAL_REQUEST_COUNT = "totalRequestCount";
+ static final String TOTAL_REQUEST_COUNT_DESC =
+ "Total number of requests this RegionServer has answered.";
+ static final String READ_REQUEST_COUNT = "readRequestCount";
+ static final String READ_REQUEST_COUNT_DESC =
+ "Number of read requests this region server has answered.";
+ static final String WRITE_REQUEST_COUNT = "writeRequestCount";
+ static final String WRITE_REQUEST_COUNT_DESC =
+ "Number of mutation requests this region server has answered.";
+ static final String CHECK_MUTATE_FAILED_COUNT = "checkMutateFailedCount";
+ static final String CHECK_MUTATE_FAILED_COUNT_DESC =
+ "Number of Check and Mutate calls that failed the checks.";
+ static final String CHECK_MUTATE_PASSED_COUNT = "checkMutatePassedCount";
+ static final String CHECK_MUTATE_PASSED_COUNT_DESC =
+ "Number of Check and Mutate calls that passed the checks.";
+ static final String STOREFILE_INDEX_SIZE = "storeFileIndexSize";
+ static final String STOREFILE_INDEX_SIZE_DESC = "Size of indexes in storefiles on disk.";
+ static final String STATIC_INDEX_SIZE = "staticIndexSize";
+ static final String STATIC_INDEX_SIZE_DESC = "Uncompressed size of the static indexes.";
+ static final String STATIC_BLOOM_SIZE = "staticBloomSize";
+ static final String STATIC_BLOOM_SIZE_DESC =
+ "Uncompressed size of the static bloom filters.";
+ static final String NUMBER_OF_PUTS_WITHOUT_WAL = "putsWithoutWALCount";
+ static final String NUMBER_OF_PUTS_WITHOUT_WAL_DESC =
+ "Number of mutations that have been sent by clients with the write ahead logging turned off.";
+ static final String DATA_SIZE_WITHOUT_WAL = "putsWithoutWALSize";
+ static final String DATA_SIZE_WITHOUT_WAL_DESC =
+ "Size of data that has been sent by clients with the write ahead logging turned off.";
+ static final String PERCENT_FILES_LOCAL = "percentFilesLocal";
+ static final String PERCENT_FILES_LOCAL_DESC =
+ "The percent of HFiles that are stored on the local hdfs data node.";
+ static final String COMPACTION_QUEUE_LENGTH = "compactionQueueLength";
+ static final String COMPACTION_QUEUE_LENGTH_DESC = "Length of the queue for compactions.";
+ static final String FLUSH_QUEUE_LENGTH = "flushQueueLength";
+ static final String FLUSH_QUEUE_LENGTH_DESC = "Length of the queue for region flushes";
+ static final String BLOCK_CACHE_FREE_SIZE = "blockCacheFreeSize";
+ static final String BLOCK_CACHE_FREE_DESC =
+ "Size of the block cache that is not occupied.";
+ static final String BLOCK_CACHE_COUNT = "blockCacheCount";
+ static final String BLOCK_CACHE_COUNT_DESC = "Number of block in the block cache.";
+ static final String BLOCK_CACHE_SIZE = "blockCacheSize";
+ static final String BLOCK_CACHE_SIZE_DESC = "Size of the block cache.";
+ static final String BLOCK_CACHE_HIT_COUNT = "blockCacheHitCount";
+ static final String BLOCK_CACHE_HIT_COUNT_DESC = "Count of the hit on the block cache.";
+ static final String BLOCK_CACHE_MISS_COUNT = "blockCacheMissCount";
+ static final String BLOCK_COUNT_MISS_COUNT_DESC =
+ "Number of requests for a block that missed the block cache.";
+ static final String BLOCK_CACHE_EVICTION_COUNT = "blockCacheEvictionCount";
+ static final String BLOCK_CACHE_EVICTION_COUNT_DESC =
+ "Count of the number of blocks evicted from the block cache.";
+ static final String BLOCK_CACHE_HIT_PERCENT = "blockCountHitPercent";
+ static final String BLOCK_CACHE_HIT_PERCENT_DESC =
+ "Percent of block cache requests that are hits";
+ static final String BLOCK_CACHE_EXPRESS_HIT_PERCENT = "blockCacheExpressHitPercent";
+ static final String BLOCK_CACHE_EXPRESS_HIT_PERCENT_DESC =
+ "The percent of the time that requests with the cache turned on hit the cache.";
+ static final String RS_START_TIME_NAME = "regionServerStartTime";
+ static final String ZOOKEEPER_QUORUM_NAME = "zookeeperQuorum";
+ static final String SERVER_NAME_NAME = "serverName";
+ static final String CLUSTER_ID_NAME = "clusterId";
+ static final String RS_START_TIME_DESC = "RegionServer Start Time";
+ static final String ZOOKEEPER_QUORUM_DESC = "Zookeeper Quorum";
+ static final String SERVER_NAME_DESC = "Server Name";
+ static final String CLUSTER_ID_DESC = "Cluster Id";
+ static final String UPDATES_BLOCKED_TIME = "updatesBlockedTime";
+ static final String UPDATES_BLOCKED_DESC =
+ "Number of MS updates have been blocked so that the memstore can be flushed.";
+ static final String DELETE_KEY = "delete";
+ static final String GET_KEY = "get";
+ static final String INCREMENT_KEY = "increment";
+ static final String PUT_KEY = "multiput";
+ static final String APPEND_KEY = "append";
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MXBean.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactory.java
similarity index 62%
rename from hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MXBean.java
rename to hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactory.java
index b0a92c573c0..39203cb7dda 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MXBean.java
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactory.java
@@ -18,29 +18,24 @@
package org.apache.hadoop.hbase.regionserver;
-import org.apache.hadoop.classification.InterfaceStability.Evolving;
-
/**
- * This is the JMX management interface for HBase Region Server information
+ * Interface of a factory to create Metrics Sources used inside of regionservers.
*/
-@Evolving
-public interface MXBean {
+public interface MetricsRegionServerSourceFactory {
/**
- * Return RegionServer's ServerName
- * @return ServerName
+ * Given a wrapper create a MetricsRegionServerSource.
+ *
+ * @param regionServerWrapper The wrapped region server
+ * @return a Metrics Source.
*/
- public String getServerName();
+ MetricsRegionServerSource createServer(MetricsRegionServerWrapper regionServerWrapper);
/**
- * Get loaded co-processors
- * @return Loaded Co-processors
+ * Create a MetricsRegionSource from a MetricsRegionWrapper.
+ *
+ * @param wrapper
+ * @return
*/
- public String[] getCoprocessors();
-
- /**
- * Get Zookeeper Quorum
- * @return Comma-separated list of Zookeeper Quorum servers
- */
- public String getZookeeperQuorum();
+ MetricsRegionSource createRegion(MetricsRegionWrapper wrapper);
}
diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
new file mode 100644
index 00000000000..454e286c097
--- /dev/null
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
@@ -0,0 +1,205 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver;
+
+/**
+ * This is the interface that will expose RegionServer information to hadoop1/hadoop2
+ * implementations of the MetricsRegionServerSource.
+ */
+public interface MetricsRegionServerWrapper {
+
+ /**
+ * Get ServerName
+ */
+ public String getServerName();
+
+ /**
+ * Get the Cluster ID
+ *
+ * @return Cluster ID
+ */
+ public String getClusterId();
+
+ /**
+ * Get the Zookeeper Quorum Info
+ *
+ * @return Zookeeper Quorum Info
+ */
+ public String getZookeeperQuorum();
+
+ /**
+ * Get the co-processors
+ *
+ * @return Co-processors
+ */
+ public String getCoprocessors();
+
+ /**
+ * Get HRegionServer start time
+ *
+ * @return Start time of RegionServer in milliseconds
+ */
+ public long getStartCode();
+
+ /**
+ * The number of online regions
+ */
+ long getNumOnlineRegions();
+
+ /**
+ * Get the number of stores hosted on this region server.
+ */
+ long getNumStores();
+
+ /**
+ * Get the number of store files hosted on this region server.
+ */
+ long getNumStoreFiles();
+
+ /**
+ * Get the size of the memstore on this region server.
+ */
+ long getMemstoreSize();
+
+ /**
+ * Get the total size of the store files this region server is serving from.
+ */
+ long getStoreFileSize();
+
+ /**
+ * Get the number of requests per second.
+ */
+ double getRequestsPerSecond();
+
+ /**
+ * Get the total number of requests per second.
+ */
+ long getTotalRequestCount();
+
+ /**
+ * Get the number of read requests to regions hosted on this region server.
+ */
+ long getReadRequestsCount();
+
+ /**
+ * Get the number of write requests to regions hosted on this region server.
+ */
+ long getWriteRequestsCount();
+
+ /**
+ * Get the number of CAS operations that failed.
+ */
+ long getCheckAndMutateChecksFailed();
+
+ /**
+ * Get the number of CAS operations that passed.
+ */
+ long getCheckAndMutateChecksPassed();
+
+ /**
+ * Get the Size of indexes in storefiles on disk.
+ */
+ long getStoreFileIndexSize();
+
+ /**
+ * Get the size of of the static indexes including the roots.
+ */
+ long getTotalStaticIndexSize();
+
+ /**
+ * Get the size of the static bloom filters.
+ */
+ long getTotalStaticBloomSize();
+
+ /**
+ * Number of mutations received with WAL explicitly turned off.
+ */
+ long getNumPutsWithoutWAL();
+
+ /**
+ * Ammount of data in the memstore but not in the WAL because mutations explicitly had their
+ * WAL turned off.
+ */
+ long getDataInMemoryWithoutWAL();
+
+ /**
+ * Get the percent of HFiles' that are local.
+ */
+ int getPercentFileLocal();
+
+ /**
+ * Get the size of the compaction queue
+ */
+ int getCompactionQueueSize();
+
+ /**
+ * Get the size of the flush queue.
+ */
+ int getFlushQueueSize();
+
+ /**
+ * Get the size of the block cache that is free.
+ */
+ long getBlockCacheFreeSize();
+
+ /**
+ * Get the number of items in the block cache.
+ */
+ long getBlockCacheCount();
+
+ /**
+ * Get the total size of the block cache.
+ */
+ long getBlockCacheSize();
+
+ /**
+ * Get the count of hits to the block cache
+ */
+ long getBlockCacheHitCount();
+
+ /**
+ * Get the count of misses to the block cache.
+ */
+ long getBlockCacheMissCount();
+
+ /**
+ * Get the number of items evicted from the block cache.
+ */
+ long getBlockCacheEvictedCount();
+
+ /**
+ * Get the percent of all requests that hit the block cache.
+ */
+ int getBlockCacheHitPercent();
+
+ /**
+ * Get the percent of requests with the block cache turned on that hit the block cache.
+ */
+ int getBlockCacheHitCachingPercent();
+
+ /**
+ * Force a re-computation of the metrics.
+ */
+ void forceRecompute();
+
+ /**
+ * Get the amount of time that updates were blocked.
+ */
+ long getUpdatesBlockedTime();
+}
diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java
new file mode 100644
index 00000000000..0bc14c328fb
--- /dev/null
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java
@@ -0,0 +1,62 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver;
+
+
+/**
+ * This interface will be implemented to allow single regions to push metrics into
+ * MetricsRegionAggregateSource that will in turn push data to the Hadoop metrics system.
+ */
+public interface MetricsRegionSource extends Comparable {
+
+ /**
+ * Close the region's metrics as this region is closing.
+ */
+ void close();
+
+ /**
+ * Update related counts of puts.
+ */
+ void updatePut();
+
+ /**
+ * Update related counts of deletes.
+ */
+ void updateDelete();
+
+ /**
+ * Update related counts of gets.
+ */
+ void updateGet();
+
+ /**
+ * Update related counts of increments.
+ */
+ void updateIncrement();
+
+ /**
+ * Update related counts of appends.
+ */
+ void updateAppend();
+
+ /**
+ * Get the aggregate source to which this reports.
+ */
+ MetricsRegionAggregateSource getAggregateSource();
+}
diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
new file mode 100644
index 00000000000..2c533ea871d
--- /dev/null
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver;
+
+/**
+ * Interface of class that will wrap an HRegion and export numbers so they can be
+ * used in MetricsRegionSource
+ */
+public interface MetricsRegionWrapper {
+
+ /**
+ * Get the name of the table the region belongs to.
+ *
+ * @return The string version of the table name.
+ */
+ String getTableName();
+
+ /**
+ * Get the name of the region.
+ *
+ * @return The encoded name of the region.
+ */
+ String getRegionName();
+
+ /**
+ * Get the number of stores hosted on this region server.
+ */
+ long getNumStores();
+
+ /**
+ * Get the number of store files hosted on this region server.
+ */
+ long getNumStoreFiles();
+
+ /**
+ * Get the size of the memstore on this region server.
+ */
+ long getMemstoreSize();
+
+ /**
+ * Get the total size of the store files this region server is serving from.
+ */
+ long getStoreFileSize();
+
+ /**
+ * Get the total number of read requests that have been issued against this region
+ */
+ long getReadRequestCount();
+
+ /**
+ * Get the total number of mutations that have been issued against this region.
+ */
+ long getWriteRequestCount();
+
+}
diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationMetricsSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSource.java
similarity index 76%
rename from hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationMetricsSource.java
rename to hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSource.java
index 0090f495341..5b79a3977a4 100644
--- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationMetricsSource.java
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSource.java
@@ -16,29 +16,29 @@
* limitations under the License.
*/
-package org.apache.hadoop.hbase.replication.regionserver.metrics;
+package org.apache.hadoop.hbase.replication.regionserver;
-import org.apache.hadoop.hbase.metrics.BaseMetricsSource;
+import org.apache.hadoop.hbase.metrics.BaseSource;
/**
* Provides access to gauges and counters. Implementers will hide the details of hadoop1 or
* hadoop2's metrics2 classes and publishing.
*/
-public interface ReplicationMetricsSource extends BaseMetricsSource {
+public interface MetricsReplicationSource extends BaseSource {
/**
* The name of the metrics
*/
- static final String METRICS_NAME = "ReplicationMetrics";
+ static final String METRICS_NAME = "Replication";
/**
* The name of the metrics context that metrics will be under.
*/
- static final String METRICS_CONTEXT = "replicationmetrics";
+ static final String METRICS_CONTEXT = "regionserver";
/**
* The name of the metrics context that metrics will be under.
*/
- static final String METRICS_JMX_CONTEXT = "ReplicationMetrics";
+ static final String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME;
/**
* A description.
diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/rest/metrics/RESTMetricsSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSource.java
similarity index 90%
rename from hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/rest/metrics/RESTMetricsSource.java
rename to hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSource.java
index 9d7f6914dc4..aa43f35fedb 100644
--- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/rest/metrics/RESTMetricsSource.java
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSource.java
@@ -16,20 +16,20 @@
* limitations under the License.
*/
-package org.apache.hadoop.hbase.rest.metrics;
+package org.apache.hadoop.hbase.rest;
-import org.apache.hadoop.hbase.metrics.BaseMetricsSource;
+import org.apache.hadoop.hbase.metrics.BaseSource;
/**
* Interface of the Metrics Source that will export data to Hadoop's Metrics2 system.
*/
-public interface RESTMetricsSource extends BaseMetricsSource {
+public interface MetricsRESTSource extends BaseSource {
- public static String METRICS_NAME = "Rest";
+ public static String METRICS_NAME = "REST";
public static String CONTEXT = "rest";
- public static String JMX_CONTEXT = "Rest";
+ public static String JMX_CONTEXT = "REST";
public static String METRICS_DESCRIPTION = "Metrics about the HBase REST server";
@@ -49,42 +49,49 @@ public interface RESTMetricsSource extends BaseMetricsSource {
/**
* Increment the number of requests
+ *
* @param inc Ammount to increment by
*/
void incrementRequests(int inc);
/**
* Increment the number of successful Get requests.
+ *
* @param inc Number of successful get requests.
*/
void incrementSucessfulGetRequests(int inc);
/**
* Increment the number of successful Put requests.
+ *
* @param inc Number of successful put requests.
*/
void incrementSucessfulPutRequests(int inc);
/**
* Increment the number of successful Delete requests.
+ *
* @param inc
*/
void incrementSucessfulDeleteRequests(int inc);
/**
* Increment the number of failed Put Requests.
+ *
* @param inc Number of failed Put requests.
*/
void incrementFailedPutRequests(int inc);
/**
* Increment the number of failed Get requests.
+ *
* @param inc The number of failed Get Requests.
*/
void incrementFailedGetRequests(int inc);
/**
* Increment the number of failed Delete requests.
+ *
* @param inc The number of failed delete requests.
*/
void incrementFailedDeleteRequests(int inc);
diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/metrics/ThriftServerMetricsSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSource.java
similarity index 92%
rename from hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/metrics/ThriftServerMetricsSource.java
rename to hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSource.java
index f6ba023b07c..206154fdb46 100644
--- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/metrics/ThriftServerMetricsSource.java
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSource.java
@@ -16,14 +16,14 @@
* limitations under the License.
*/
-package org.apache.hadoop.hbase.thrift.metrics;
+package org.apache.hadoop.hbase.thrift;
-import org.apache.hadoop.hbase.metrics.BaseMetricsSource;
+import org.apache.hadoop.hbase.metrics.BaseSource;
/**
* Inteface of a class that will export metrics about Thrift to hadoop's metrics2.
*/
-public interface ThriftServerMetricsSource extends BaseMetricsSource {
+public interface MetricsThriftServerSource extends BaseSource {
static final String BATCH_GET_KEY = "batchGet";
static final String BATCH_MUTATE_KEY = "batchMutate";
diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/metrics/ThriftServerMetricsSourceFactory.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactory.java
similarity index 81%
rename from hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/metrics/ThriftServerMetricsSourceFactory.java
rename to hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactory.java
index be6b5f9e7ab..8fca2cf3ce8 100644
--- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/metrics/ThriftServerMetricsSourceFactory.java
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactory.java
@@ -16,10 +16,10 @@
* limitations under the License.
*/
-package org.apache.hadoop.hbase.thrift.metrics;
+package org.apache.hadoop.hbase.thrift;
/** Factory that will be used to create metrics sources for the two diffent types of thrift servers. */
-public interface ThriftServerMetricsSourceFactory {
+public interface MetricsThriftServerSourceFactory {
static final String METRICS_NAME = "Thrift";
static final String METRICS_DESCRIPTION = "Thrift Server Metrics";
@@ -28,8 +28,10 @@ public interface ThriftServerMetricsSourceFactory {
static final String THRIFT_TWO_METRICS_CONTEXT = "thrift-two";
static final String THRIFT_TWO_JMX_CONTEXT = "Thrift,sub=ThriftTwo";
- ThriftServerMetricsSource createThriftOneSource();
+ /** Create a Source for a thrift one server */
+ MetricsThriftServerSource createThriftOneSource();
- ThriftServerMetricsSource createThriftTwoSource();
+ /** Create a Source for a thrift two server */
+ MetricsThriftServerSource createThriftTwoSource();
}
diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics/MetricHistogram.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricHistogram.java
similarity index 82%
rename from hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics/MetricHistogram.java
rename to hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricHistogram.java
index c5d6e491d38..f431632a170 100644
--- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics/MetricHistogram.java
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricHistogram.java
@@ -16,13 +16,15 @@
* limitations under the License.
*/
-package org.apache.hadoop.metrics;
+package org.apache.hadoop.metrics2;
/**
- *
+ * Metrics Histogram interface. Implementing classes will expose computed
+ * quartile values through the metrics system.
*/
public interface MetricHistogram {
+ //Strings used to create metrics names.
static final String NUM_OPS_METRIC_NAME = "_num_ops";
static final String MIN_METRIC_NAME = "_min";
static final String MAX_METRIC_NAME = "_max";
@@ -32,6 +34,10 @@ public interface MetricHistogram {
static final String NINETY_FIFTH_PERCENTILE_METRIC_NAME = "_95th_percentile";
static final String NINETY_NINETH_PERCENTILE_METRIC_NAME = "_99th_percentile";
+ /**
+ * Add a single value to a histogram's stream of values.
+ * @param value
+ */
void add(long value);
}
diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics/MetricsExecutor.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricsExecutor.java
similarity index 92%
rename from hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics/MetricsExecutor.java
rename to hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricsExecutor.java
index 4094922c3eb..f2ebc94d01e 100644
--- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics/MetricsExecutor.java
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricsExecutor.java
@@ -16,12 +16,12 @@
* limitations under the License.
*/
-package org.apache.hadoop.metrics;
+package org.apache.hadoop.metrics2;
import java.util.concurrent.ScheduledExecutorService;
/**
- *
+ * ScheduledExecutorService for metrics.
*/
public interface MetricsExecutor {
diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/master/metrics/TestMasterMetricsSourceFactory.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterSourceFactory.java
similarity index 78%
rename from hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/master/metrics/TestMasterMetricsSourceFactory.java
rename to hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterSourceFactory.java
index 9f28c491f79..3d83975c10e 100644
--- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/master/metrics/TestMasterMetricsSourceFactory.java
+++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterSourceFactory.java
@@ -16,20 +16,21 @@
* limitations under the License.
*/
-package org.apache.hadoop.hbase.master.metrics;
+package org.apache.hadoop.hbase.master;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
+import org.apache.hadoop.hbase.master.MetricsMasterSource;
import org.junit.Test;
/**
- * Test for the CompatibilitySingletonFactory and building MasterMetricsSource
+ * Test for the CompatibilitySingletonFactory and building MetricsMasterSource
*/
-public class TestMasterMetricsSourceFactory {
+public class TestMetricsMasterSourceFactory {
@Test(expected=RuntimeException.class)
public void testGetInstanceNoHadoopCompat() throws Exception {
//This should throw an exception because there is no compat lib on the class path.
- CompatibilitySingletonFactory.getInstance(MasterMetricsSource.class);
+ CompatibilitySingletonFactory.getInstance(MetricsMasterSourceFactory.class);
}
}
diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerSourceFactory.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerSourceFactory.java
new file mode 100644
index 00000000000..1326b858e3b
--- /dev/null
+++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerSourceFactory.java
@@ -0,0 +1,36 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
+import org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactory;
+import org.junit.Test;
+
+/**
+ * Test for the CompatibilitySingletonFactory and building MetricsRegionServerSource
+ */
+public class TestMetricsRegionServerSourceFactory {
+
+ @Test(expected=RuntimeException.class)
+ public void testGetInstanceNoHadoopCompat() throws Exception {
+ //This should throw an exception because there is no compat lib on the class path.
+ CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class);
+
+ }
+}
diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/metrics/TestReplicationMetricsSourceFactory.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceFactory.java
similarity index 76%
rename from hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/metrics/TestReplicationMetricsSourceFactory.java
rename to hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceFactory.java
index 9378dff80bc..637b6f79f2e 100644
--- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/metrics/TestReplicationMetricsSourceFactory.java
+++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceFactory.java
@@ -16,19 +16,20 @@
* limitations under the License.
*/
-package org.apache.hadoop.hbase.replication.regionserver.metrics;
+package org.apache.hadoop.hbase.replication.regionserver;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
+import org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSource;
import org.junit.Test;
/**
- * Test for the CompatibilitySingletonFactory and building ReplicationMetricsSource
+ * Test for the CompatibilitySingletonFactory and building MetricsReplicationSource
*/
-public class TestReplicationMetricsSourceFactory {
+public class TestMetricsReplicationSourceFactory {
@Test(expected=RuntimeException.class)
public void testGetInstanceNoHadoopCompat() throws Exception {
//This should throw an exception because there is no compat lib on the class path.
- CompatibilitySingletonFactory.getInstance(ReplicationMetricsSource.class);
+ CompatibilitySingletonFactory.getInstance(MetricsReplicationSource.class);
}
}
diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/rest/metrics/TestRESTMetricsSource.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/rest/TestMetricsRESTSource.java
similarity index 85%
rename from hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/rest/metrics/TestRESTMetricsSource.java
rename to hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/rest/TestMetricsRESTSource.java
index e3f18f783f7..0691fa12962 100644
--- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/rest/metrics/TestRESTMetricsSource.java
+++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/rest/TestMetricsRESTSource.java
@@ -16,21 +16,22 @@
* limitations under the License.
*/
-package org.apache.hadoop.hbase.rest.metrics;
+package org.apache.hadoop.hbase.rest;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
+import org.apache.hadoop.hbase.rest.MetricsRESTSource;
import org.junit.Test;
/**
* Test of Rest Metrics Source interface.
*/
-public class TestRESTMetricsSource {
+public class TestMetricsRESTSource {
@Test(expected=RuntimeException.class)
public void testGetInstanceNoHadoopCompat() throws Exception {
//This should throw an exception because there is no compat lib on the class path.
- CompatibilitySingletonFactory.getInstance(RESTMetricsSource.class);
+ CompatibilitySingletonFactory.getInstance(MetricsRESTSource.class);
}
}
diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelper.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelper.java
index fc668bf8398..968ab83ac1b 100644
--- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelper.java
+++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelper.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.hbase.test;
-import org.apache.hadoop.hbase.metrics.BaseMetricsSource;
+import org.apache.hadoop.hbase.metrics.BaseSource;
/** Interface of a class to make assertions about metrics values. */
public interface MetricsAssertHelper {
@@ -28,128 +28,128 @@ public interface MetricsAssertHelper {
*
* @param name The name of the tag.
* @param expected The expected value
- * @param source The BaseMetricsSource{@link BaseMetricsSource} that will provide the tags,
+ * @param source The BaseSource{@link BaseSource} that will provide the tags,
* gauges, and counters.
*/
- public void assertTag(String name, String expected, BaseMetricsSource source);
+ public void assertTag(String name, String expected, BaseSource source);
/**
* Assert that a gauge exists and that it's value is equal to the expected value.
*
* @param name The name of the gauge
* @param expected The expected value of the gauge.
- * @param source The BaseMetricsSource{@link BaseMetricsSource} that will provide the tags,
+ * @param source The BaseSource{@link BaseSource} that will provide the tags,
* gauges, and counters.
*/
- public void assertGauge(String name, long expected, BaseMetricsSource source);
+ public void assertGauge(String name, long expected, BaseSource source);
/**
* Assert that a gauge exists and it's value is greater than a given value
*
* @param name The name of the gauge
* @param expected Value that the gauge is expected to be greater than
- * @param source The BaseMetricsSource{@link BaseMetricsSource} that will provide the tags,
+ * @param source The BaseSource{@link BaseSource} that will provide the tags,
* gauges, and counters.
*/
- public void assertGaugeGt(String name, long expected, BaseMetricsSource source);
+ public void assertGaugeGt(String name, long expected, BaseSource source);
/**
* Assert that a gauge exists and it's value is less than a given value
*
* @param name The name of the gauge
* @param expected Value that the gauge is expected to be less than
- * @param source The BaseMetricsSource{@link BaseMetricsSource} that will provide the tags,
+ * @param source The BaseSource{@link BaseSource} that will provide the tags,
* gauges, and counters.
*/
- public void assertGaugeLt(String name, long expected, BaseMetricsSource source);
+ public void assertGaugeLt(String name, long expected, BaseSource source);
/**
* Assert that a gauge exists and that it's value is equal to the expected value.
*
* @param name The name of the gauge
* @param expected The expected value of the gauge.
- * @param source The BaseMetricsSource{@link BaseMetricsSource} that will provide the tags,
+ * @param source The BaseSource{@link BaseSource} that will provide the tags,
* gauges, and counters.
*/
- public void assertGauge(String name, double expected, BaseMetricsSource source);
+ public void assertGauge(String name, double expected, BaseSource source);
/**
* Assert that a gauge exists and it's value is greater than a given value
*
* @param name The name of the gauge
* @param expected Value that the gauge is expected to be greater than
- * @param source The BaseMetricsSource{@link BaseMetricsSource} that will provide the tags,
+ * @param source The BaseSource{@link BaseSource} that will provide the tags,
* gauges, and counters.
*/
- public void assertGaugeGt(String name, double expected, BaseMetricsSource source);
+ public void assertGaugeGt(String name, double expected, BaseSource source);
/**
* Assert that a gauge exists and it's value is less than a given value
*
* @param name The name of the gauge
* @param expected Value that the gauge is expected to be less than
- * @param source The BaseMetricsSource{@link BaseMetricsSource} that will provide the tags,
+ * @param source The BaseSource{@link BaseSource} that will provide the tags,
* gauges, and counters.
*/
- public void assertGaugeLt(String name, double expected, BaseMetricsSource source);
+ public void assertGaugeLt(String name, double expected, BaseSource source);
/**
* Assert that a counter exists and that it's value is equal to the expected value.
*
* @param name The name of the counter.
* @param expected The expected value
- * @param source The BaseMetricsSource{@link BaseMetricsSource} that will provide the tags,
+ * @param source The BaseSource{@link BaseSource} that will provide the tags,
* gauges, and counters.
*/
- public void assertCounter(String name, long expected, BaseMetricsSource source);
+ public void assertCounter(String name, long expected, BaseSource source);
/**
* Assert that a counter exists and that it's value is greater than the given value.
*
* @param name The name of the counter.
* @param expected The value the counter is expected to be greater than.
- * @param source The BaseMetricsSource{@link BaseMetricsSource} that will provide the tags,
+ * @param source The BaseSource{@link BaseSource} that will provide the tags,
* gauges, and counters.
*/
- public void assertCounterGt(String name, long expected, BaseMetricsSource source);
+ public void assertCounterGt(String name, long expected, BaseSource source);
/**
* Assert that a counter exists and that it's value is less than the given value.
*
* @param name The name of the counter.
* @param expected The value the counter is expected to be less than.
- * @param source The BaseMetricsSource{@link BaseMetricsSource} that will provide the tags,
+ * @param source The BaseSource{@link BaseSource} that will provide the tags,
* gauges, and counters.
*/
- public void assertCounterLt(String name, long expected, BaseMetricsSource source);
+ public void assertCounterLt(String name, long expected, BaseSource source);
/**
* Get the value of a counter.
*
* @param name name of the counter.
- * @param source The BaseMetricsSource{@link BaseMetricsSource} that will provide the tags,
+ * @param source The BaseSource{@link BaseSource} that will provide the tags,
* gauges, and counters.
* @return long value of the counter.
*/
- public long getCounter(String name, BaseMetricsSource source);
+ public long getCounter(String name, BaseSource source);
/**
* Get the value of a gauge as a double.
*
* @param name name of the gauge.
- * @param source The BaseMetricsSource{@link BaseMetricsSource} that will provide the tags,
+ * @param source The BaseSource{@link BaseSource} that will provide the tags,
* gauges, and counters.
* @return double value of the gauge.
*/
- public double getGaugeDouble(String name, BaseMetricsSource source);
+ public double getGaugeDouble(String name, BaseSource source);
/**
* Get the value of a gauge as a long.
*
* @param name name of the gauge.
- * @param source The BaseMetricsSource{@link BaseMetricsSource} that will provide the tags,
+ * @param source The BaseSource{@link BaseSource} that will provide the tags,
* gauges, and counters.
* @return long value of the gauge.
*/
- public long getGaugeLong(String name, BaseMetricsSource source);
+ public long getGaugeLong(String name, BaseSource source);
}
diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/thrift/metrics/TestThriftServerMetricsSourceFactory.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestMetricsThriftServerSourceFactory.java
similarity index 78%
rename from hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/thrift/metrics/TestThriftServerMetricsSourceFactory.java
rename to hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestMetricsThriftServerSourceFactory.java
index b1f253e1db5..bd132bbf760 100644
--- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/thrift/metrics/TestThriftServerMetricsSourceFactory.java
+++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestMetricsThriftServerSourceFactory.java
@@ -16,21 +16,22 @@
* limitations under the License.
*/
-package org.apache.hadoop.hbase.thrift.metrics;
+package org.apache.hadoop.hbase.thrift;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
+import org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactory;
import org.junit.Test;
/**
- * Test for the interface of ThriftServerMetricsSourceFactory
+ * Test for the interface of MetricsThriftServerSourceFactory
*/
-public class TestThriftServerMetricsSourceFactory {
+public class TestMetricsThriftServerSourceFactory {
@Test(expected=RuntimeException.class)
public void testGetInstanceNoHadoopCompat() throws RuntimeException {
//This should throw an exception because there is no compat lib on the class path.
- CompatibilitySingletonFactory.getInstance(ThriftServerMetricsSourceFactory.class);
+ CompatibilitySingletonFactory.getInstance(MetricsThriftServerSourceFactory.class);
}
}
diff --git a/hbase-hadoop1-compat/pom.xml b/hbase-hadoop1-compat/pom.xml
index eacde23a3f3..6b554716208 100644
--- a/hbase-hadoop1-compat/pom.xml
+++ b/hbase-hadoop1-compat/pom.xml
@@ -97,6 +97,10 @@ limitations under the License.
com.yammer.metricsmetrics-core
+
+ log4j
+ log4j
+ org.apache.hadoophadoop-test
diff --git a/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceFactoryImpl.java b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactoryImpl.java
similarity index 63%
rename from hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceFactoryImpl.java
rename to hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactoryImpl.java
index 4a170462127..350c39d9152 100644
--- a/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceFactoryImpl.java
+++ b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactoryImpl.java
@@ -16,22 +16,22 @@
* limitations under the License.
*/
-package org.apache.hadoop.hbase.master.metrics;
+package org.apache.hadoop.hbase.master;
/**
- * Factory to create MasterMetricsSource when given a MasterMetricsWrapper
+ * Factory to create MetricsMasterSource when given a MetricsMasterWrapper
*/
-public class MasterMetricsSourceFactoryImpl implements MasterMetricsSourceFactory {
+public class MetricsMasterSourceFactoryImpl implements MetricsMasterSourceFactory {
private static enum FactoryStorage {
INSTANCE;
- MasterMetricsSource source;
+ MetricsMasterSource masterSource;
}
@Override
- public synchronized MasterMetricsSource create(MasterMetricsWrapper beanWrapper) {
- if (FactoryStorage.INSTANCE.source == null ) {
- FactoryStorage.INSTANCE.source = new MasterMetricsSourceImpl(beanWrapper);
+ public synchronized MetricsMasterSource create(MetricsMasterWrapper masterWrapper) {
+ if (FactoryStorage.INSTANCE.masterSource == null) {
+ FactoryStorage.INSTANCE.masterSource = new MetricsMasterSourceImpl(masterWrapper);
}
- return FactoryStorage.INSTANCE.source;
+ return FactoryStorage.INSTANCE.masterSource;
}
}
diff --git a/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceImpl.java b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java
similarity index 79%
rename from hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceImpl.java
rename to hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java
index 85c7373b89a..b00fec50160 100644
--- a/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceImpl.java
+++ b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java
@@ -16,41 +16,42 @@
* limitations under the License.
*/
-package org.apache.hadoop.hbase.master.metrics;
+package org.apache.hadoop.hbase.master;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.metrics.BaseMetricsSourceImpl;
+import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
import org.apache.hadoop.metrics2.MetricsBuilder;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.lib.MetricMutableCounterLong;
import org.apache.hadoop.metrics2.lib.MetricMutableGaugeLong;
import org.apache.hadoop.metrics2.lib.MetricMutableHistogram;
-/** Hadoop1 implementation of MasterMetricsSource. */
-public class MasterMetricsSourceImpl
- extends BaseMetricsSourceImpl implements MasterMetricsSource {
+/**
+ * Hadoop1 implementation of MetricsMasterSource.
+ */
+public class MetricsMasterSourceImpl
+ extends BaseSourceImpl implements MetricsMasterSource {
- private static final Log LOG = LogFactory.getLog(MasterMetricsSourceImpl.class.getName());
+ private static final Log LOG = LogFactory.getLog(MetricsMasterSourceImpl.class.getName());
- MetricMutableCounterLong clusterRequestsCounter;
- MetricMutableGaugeLong ritGauge;
- MetricMutableGaugeLong ritCountOverThresholdGauge;
- MetricMutableGaugeLong ritOldestAgeGauge;
-
- private final MasterMetricsWrapper masterWrapper;
+ private final MetricsMasterWrapper masterWrapper;
+ private MetricMutableCounterLong clusterRequestsCounter;
+ private MetricMutableGaugeLong ritGauge;
+ private MetricMutableGaugeLong ritCountOverThresholdGauge;
+ private MetricMutableGaugeLong ritOldestAgeGauge;
private MetricMutableHistogram splitTimeHisto;
private MetricMutableHistogram splitSizeHisto;
- public MasterMetricsSourceImpl(MasterMetricsWrapper masterWrapper) {
+ public MetricsMasterSourceImpl(MetricsMasterWrapper masterWrapper) {
this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT, masterWrapper);
}
- public MasterMetricsSourceImpl(String metricsName,
+ public MetricsMasterSourceImpl(String metricsName,
String metricsDescription,
String metricsContext,
String metricsJmxContext,
- MasterMetricsWrapper masterWrapper) {
+ MetricsMasterWrapper masterWrapper) {
super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
this.masterWrapper = masterWrapper;
}
@@ -102,15 +103,15 @@ public class MasterMetricsSourceImpl
public void getMetrics(MetricsBuilder metricsBuilder, boolean all) {
MetricsRecordBuilder metricsRecordBuilder = metricsBuilder.addRecord(metricsName)
- .setContext(metricsContext);
+ .setContext(metricsContext);
// masterWrapper can be null because this function is called inside of init.
if (masterWrapper != null) {
metricsRecordBuilder
.addGauge(MASTER_ACTIVE_TIME_NAME,
- MASTER_ACTIVE_TIME_DESC, masterWrapper.getMasterActiveTime())
+ MASTER_ACTIVE_TIME_DESC, masterWrapper.getActiveTime())
.addGauge(MASTER_START_TIME_NAME,
- MASTER_START_TIME_DESC, masterWrapper.getMasterStartTime())
+ MASTER_START_TIME_DESC, masterWrapper.getStartTime())
.addGauge(AVERAGE_LOAD_NAME, AVERAGE_LOAD_DESC, masterWrapper.getAverageLoad())
.addGauge(NUM_REGION_SERVERS_NAME,
NUMBER_OF_REGION_SERVERS_DESC, masterWrapper.getRegionServers())
@@ -125,7 +126,7 @@ public class MasterMetricsSourceImpl
String.valueOf(masterWrapper.getIsActiveMaster()));
}
- metricsRegistry.snapshot(metricsRecordBuilder, true);
+ metricsRegistry.snapshot(metricsRecordBuilder, all);
}
}
diff --git a/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseMetricsSourceImpl.java b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSourceImpl.java
similarity index 80%
rename from hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseMetricsSourceImpl.java
rename to hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSourceImpl.java
index 0943370b0d6..857768cf5e6 100644
--- a/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseMetricsSourceImpl.java
+++ b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSourceImpl.java
@@ -19,19 +19,16 @@
package org.apache.hadoop.hbase.metrics;
import org.apache.hadoop.metrics2.MetricsBuilder;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.MetricsSource;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.metrics2.lib.DynamicMetricsRegistry;
-import org.apache.hadoop.metrics2.lib.MetricMutableCounterLong;
-import org.apache.hadoop.metrics2.lib.MetricMutableGaugeLong;
-import org.apache.hadoop.metrics2.lib.MetricMutableHistogram;
-import org.apache.hadoop.metrics2.lib.MetricMutableQuantiles;
+import org.apache.hadoop.metrics2.impl.JmxCacheBuster;
+import org.apache.hadoop.metrics2.lib.*;
import org.apache.hadoop.metrics2.source.JvmMetricsSource;
/**
- * Hadoop 1 implementation of BaseMetricsSource (using metrics2 framework)
+ * Hadoop 1 implementation of BaseSource (using metrics2 framework)
*/
-public class BaseMetricsSourceImpl implements BaseMetricsSource, MetricsSource {
+public class BaseSourceImpl implements BaseSource, MetricsSource {
private static enum DefaultMetricsSystemInitializer {
INSTANCE;
@@ -46,8 +43,6 @@ public class BaseMetricsSourceImpl implements BaseMetricsSource, MetricsSource {
}
}
- private static boolean defaultMetricsSystemInited = false;
- public static final String HBASE_METRICS_SYSTEM_NAME = "hbase";
protected final DynamicMetricsRegistry metricsRegistry;
protected final String metricsName;
@@ -55,7 +50,7 @@ public class BaseMetricsSourceImpl implements BaseMetricsSource, MetricsSource {
protected final String metricsContext;
protected final String metricsJmxContext;
- public BaseMetricsSourceImpl(
+ public BaseSourceImpl(
String metricsName,
String metricsDescription,
String metricsContext,
@@ -137,22 +132,15 @@ public class BaseMetricsSourceImpl implements BaseMetricsSource, MetricsSource {
}
/**
- * Remove a named gauge.
+ * Remove a named metric.
*
* @param key
*/
- public void removeGauge(String key) {
+ public void removeMetric(String key) {
metricsRegistry.removeMetric(key);
+ JmxCacheBuster.clearJmxCache();
}
- /**
- * Remove a named counter.
- *
- * @param key
- */
- public void removeCounter(String key) {
- metricsRegistry.removeMetric(key);
- }
/**
* Method to export all the metrics.
@@ -162,14 +150,16 @@ public class BaseMetricsSourceImpl implements BaseMetricsSource, MetricsSource {
*/
@Override
public void getMetrics(MetricsBuilder metricsBuilder, boolean all) {
- metricsRegistry.snapshot(metricsBuilder.addRecord(metricsRegistry.name()), all);
+ MetricsRecordBuilder mrb = metricsBuilder.addRecord(metricsName)
+ .setContext(metricsContext);
+ metricsRegistry.snapshot(mrb, all);
}
/**
* Used to get at the DynamicMetricsRegistry.
* @return DynamicMetricsRegistry
*/
- protected DynamicMetricsRegistry getMetricsRegistry() {
+ public DynamicMetricsRegistry getMetricsRegistry() {
return metricsRegistry;
}
}
diff --git a/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSourceImpl.java b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSourceImpl.java
new file mode 100644
index 00000000000..658deb4e091
--- /dev/null
+++ b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSourceImpl.java
@@ -0,0 +1,82 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
+import org.apache.hadoop.metrics2.MetricsBuilder;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+
+import java.util.TreeSet;
+
+public class MetricsRegionAggregateSourceImpl extends BaseSourceImpl
+ implements MetricsRegionAggregateSource {
+ private final Log LOG = LogFactory.getLog(this.getClass());
+
+ private final TreeSet regionSources =
+ new TreeSet();
+
+ public MetricsRegionAggregateSourceImpl() {
+ this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT);
+ }
+
+
+ public MetricsRegionAggregateSourceImpl(String metricsName,
+ String metricsDescription,
+ String metricsContext,
+ String metricsJmxContext) {
+ super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
+ }
+
+ @Override
+ public void register(MetricsRegionSource source) {
+ regionSources.add((MetricsRegionSourceImpl) source);
+ }
+
+ @Override
+ public void deregister(MetricsRegionSource source) {
+ regionSources.remove(source);
+ }
+
+ /**
+ * Yes this is a get function that doesn't return anything. Thanks Hadoop for breaking all
+ * expectations of java programmers. Instead of returning anything Hadoop metrics expects
+ * getMetrics to push the metrics into the metricsBuilder.
+ *
+ * @param metricsBuilder Builder to accept metrics
+ * @param all push all or only changed?
+ */
+ @Override
+ public void getMetrics(MetricsBuilder metricsBuilder, boolean all) {
+
+
+ MetricsRecordBuilder mrb = metricsBuilder.addRecord(metricsName)
+ .setContext(metricsContext);
+
+ if (regionSources != null) {
+ for (MetricsRegionSourceImpl regionMetricSource : regionSources) {
+ regionMetricSource.snapshot(mrb, all);
+ }
+ }
+
+
+ metricsRegistry.snapshot(mrb, all);
+ }
+}
diff --git a/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactoryImpl.java b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactoryImpl.java
new file mode 100644
index 00000000000..dc4ae6abc7c
--- /dev/null
+++ b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactoryImpl.java
@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver;
+
+/**
+ * Factory to create MetricsRegionServerSource when given a MetricsRegionServerWrapper
+ */
+public class MetricsRegionServerSourceFactoryImpl implements MetricsRegionServerSourceFactory {
+ private static enum FactoryStorage {
+ INSTANCE;
+ private MetricsRegionServerSource serverSource;
+ private MetricsRegionAggregateSourceImpl aggImpl;
+ }
+
+ private synchronized MetricsRegionAggregateSourceImpl getAggregate() {
+ if (FactoryStorage.INSTANCE.aggImpl == null) {
+ FactoryStorage.INSTANCE.aggImpl = new MetricsRegionAggregateSourceImpl();
+ }
+ return FactoryStorage.INSTANCE.aggImpl;
+ }
+
+
+ @Override
+ public synchronized MetricsRegionServerSource createServer(MetricsRegionServerWrapper regionServerWrapper) {
+ if (FactoryStorage.INSTANCE.serverSource == null) {
+ FactoryStorage.INSTANCE.serverSource = new MetricsRegionServerSourceImpl(
+ regionServerWrapper);
+ }
+ return FactoryStorage.INSTANCE.serverSource;
+ }
+
+ @Override
+ public MetricsRegionSource createRegion(MetricsRegionWrapper wrapper) {
+ return new MetricsRegionSourceImpl(wrapper, getAggregate());
+ }
+}
diff --git a/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
new file mode 100644
index 00000000000..cffb1c14abb
--- /dev/null
+++ b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
@@ -0,0 +1,161 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
+import org.apache.hadoop.metrics2.MetricHistogram;
+import org.apache.hadoop.metrics2.MetricsBuilder;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+
+/**
+ * Hadoop1 implementation of MetricsRegionServerSource.
+ */
+public class MetricsRegionServerSourceImpl
+ extends BaseSourceImpl implements MetricsRegionServerSource {
+
+ final MetricsRegionServerWrapper rsWrap;
+ private final MetricHistogram putHisto;
+ private final MetricHistogram deleteHisto;
+ private final MetricHistogram getHisto;
+ private final MetricHistogram incrementHisto;
+ private final MetricHistogram appendHisto;
+
+ public MetricsRegionServerSourceImpl(MetricsRegionServerWrapper rsWrap) {
+ this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT, rsWrap);
+ }
+
+ public MetricsRegionServerSourceImpl(String metricsName,
+ String metricsDescription,
+ String metricsContext,
+ String metricsJmxContext,
+ MetricsRegionServerWrapper rsWrap) {
+ super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
+ this.rsWrap = rsWrap;
+
+ putHisto = getMetricsRegistry().getHistogram(PUT_KEY);
+ deleteHisto = getMetricsRegistry().getHistogram(DELETE_KEY);
+ getHisto = getMetricsRegistry().getHistogram(GET_KEY);
+ incrementHisto = getMetricsRegistry().getHistogram(INCREMENT_KEY);
+ appendHisto = getMetricsRegistry().getHistogram(APPEND_KEY);
+ }
+
+ @Override
+ public void init() {
+ super.init();
+ }
+
+ @Override
+ public void updatePut(long t) {
+ putHisto.add(t);
+ }
+
+ @Override
+ public void updateDelete(long t) {
+ deleteHisto.add(t);
+ }
+
+ @Override
+ public void updateGet(long t) {
+ getHisto.add(t);
+ }
+
+ @Override
+ public void updateIncrement(long t) {
+ incrementHisto.add(t);
+ }
+
+ @Override
+ public void updateAppend(long t) {
+ appendHisto.add(t);
+ }
+
+ /**
+ * Yes this is a get function that doesn't return anything. Thanks Hadoop for breaking all
+ * expectations of java programmers. Instead of returning anything Hadoop metrics expects
+ * getMetrics to push the metrics into the metricsBuilder.
+ *
+ * @param metricsBuilder Builder to accept metrics
+ * @param all push all or only changed?
+ */
+ @Override
+ public void getMetrics(MetricsBuilder metricsBuilder, boolean all) {
+
+ MetricsRecordBuilder mrb = metricsBuilder.addRecord(metricsName)
+ .setContext(metricsContext);
+
+ // rsWrap can be null because this function is called inside of init.
+ if (rsWrap != null) {
+ mrb.addGauge(REGION_COUNT, REGION_COUNT_DESC, rsWrap.getNumOnlineRegions())
+ .addGauge(STORE_COUNT, STORE_COUNT_DESC, rsWrap.getNumStores())
+ .addGauge(STOREFILE_COUNT, STOREFILE_COUNT_DESC, rsWrap.getNumStoreFiles())
+ .addGauge(MEMSTORE_SIZE, MEMSTORE_SIZE_DESC, rsWrap.getMemstoreSize())
+ .addGauge(STOREFILE_SIZE, STOREFILE_SIZE_DESC, rsWrap.getStoreFileSize())
+ .addGauge(RS_START_TIME_NAME, RS_START_TIME_DESC, rsWrap.getStartCode())
+ .addCounter(TOTAL_REQUEST_COUNT, TOTAL_REQUEST_COUNT_DESC, rsWrap.getTotalRequestCount())
+ .addCounter(READ_REQUEST_COUNT, READ_REQUEST_COUNT_DESC, rsWrap.getReadRequestsCount())
+ .addCounter(WRITE_REQUEST_COUNT, WRITE_REQUEST_COUNT_DESC, rsWrap.getWriteRequestsCount())
+ .addCounter(CHECK_MUTATE_FAILED_COUNT,
+ CHECK_MUTATE_FAILED_COUNT_DESC,
+ rsWrap.getCheckAndMutateChecksFailed())
+ .addCounter(CHECK_MUTATE_PASSED_COUNT,
+ CHECK_MUTATE_PASSED_COUNT_DESC,
+ rsWrap.getCheckAndMutateChecksPassed())
+ .addGauge(STOREFILE_INDEX_SIZE, STOREFILE_INDEX_SIZE_DESC, rsWrap.getStoreFileIndexSize())
+ .addGauge(STATIC_INDEX_SIZE, STATIC_INDEX_SIZE_DESC, rsWrap.getTotalStaticIndexSize())
+ .addGauge(STATIC_BLOOM_SIZE, STATIC_BLOOM_SIZE_DESC, rsWrap.getTotalStaticBloomSize())
+ .addGauge(NUMBER_OF_PUTS_WITHOUT_WAL,
+ NUMBER_OF_PUTS_WITHOUT_WAL_DESC,
+ rsWrap.getNumPutsWithoutWAL())
+ .addGauge(DATA_SIZE_WITHOUT_WAL,
+ DATA_SIZE_WITHOUT_WAL_DESC,
+ rsWrap.getDataInMemoryWithoutWAL())
+ .addGauge(PERCENT_FILES_LOCAL, PERCENT_FILES_LOCAL_DESC, rsWrap.getPercentFileLocal())
+ .addGauge(COMPACTION_QUEUE_LENGTH,
+ COMPACTION_QUEUE_LENGTH_DESC,
+ rsWrap.getCompactionQueueSize())
+ .addGauge(FLUSH_QUEUE_LENGTH, FLUSH_QUEUE_LENGTH_DESC, rsWrap.getFlushQueueSize())
+ .addGauge(BLOCK_CACHE_FREE_SIZE, BLOCK_CACHE_FREE_DESC, rsWrap.getBlockCacheFreeSize())
+ .addGauge(BLOCK_CACHE_COUNT, BLOCK_CACHE_COUNT_DESC, rsWrap.getBlockCacheCount())
+ .addGauge(BLOCK_CACHE_SIZE, BLOCK_CACHE_SIZE_DESC, rsWrap.getBlockCacheSize())
+ .addCounter(BLOCK_CACHE_HIT_COUNT,
+ BLOCK_CACHE_HIT_COUNT_DESC,
+ rsWrap.getBlockCacheHitCount())
+ .addCounter(BLOCK_CACHE_MISS_COUNT,
+ BLOCK_COUNT_MISS_COUNT_DESC,
+ rsWrap.getBlockCacheMissCount())
+ .addCounter(BLOCK_CACHE_EVICTION_COUNT,
+ BLOCK_CACHE_EVICTION_COUNT_DESC,
+ rsWrap.getBlockCacheEvictedCount())
+ .addGauge(BLOCK_CACHE_HIT_PERCENT,
+ BLOCK_CACHE_HIT_PERCENT_DESC,
+ rsWrap.getBlockCacheHitPercent())
+ .addGauge(BLOCK_CACHE_EXPRESS_HIT_PERCENT,
+ BLOCK_CACHE_EXPRESS_HIT_PERCENT_DESC,
+ rsWrap.getBlockCacheHitCachingPercent())
+ .addCounter(UPDATES_BLOCKED_TIME, UPDATES_BLOCKED_DESC, rsWrap.getUpdatesBlockedTime())
+ .tag(ZOOKEEPER_QUORUM_NAME, ZOOKEEPER_QUORUM_DESC, rsWrap.getZookeeperQuorum())
+ .tag(SERVER_NAME_NAME, SERVER_NAME_DESC, rsWrap.getServerName())
+ .tag(CLUSTER_ID_NAME, CLUSTER_ID_DESC, rsWrap.getClusterId());
+ }
+
+ metricsRegistry.snapshot(mrb, all);
+ }
+
+
+}
diff --git a/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java
new file mode 100644
index 00000000000..ea44d3a7d1c
--- /dev/null
+++ b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java
@@ -0,0 +1,163 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.metrics2.impl.JmxCacheBuster;
+import org.apache.hadoop.metrics2.lib.DynamicMetricsRegistry;
+import org.apache.hadoop.metrics2.lib.MetricMutableCounterLong;
+
+public class MetricsRegionSourceImpl implements MetricsRegionSource {
+
+ private final MetricsRegionWrapper regionWrapper;
+ private boolean closed = false;
+ private MetricsRegionAggregateSourceImpl agg;
+ private DynamicMetricsRegistry registry;
+ private static final Log LOG = LogFactory.getLog(MetricsRegionSourceImpl.class);
+
+ private String regionNamePrefix;
+ private String regionPutKey;
+ private String regionDeleteKey;
+ private String regionGetKey;
+ private String regionIncrementKey;
+ private String regionAppendKey;
+ private MetricMutableCounterLong regionPut;
+ private MetricMutableCounterLong regionDelete;
+ private MetricMutableCounterLong regionGet;
+ private MetricMutableCounterLong regionIncrement;
+ private MetricMutableCounterLong regionAppend;
+
+ public MetricsRegionSourceImpl(MetricsRegionWrapper regionWrapper,
+ MetricsRegionAggregateSourceImpl aggregate) {
+ this.regionWrapper = regionWrapper;
+ agg = aggregate;
+ agg.register(this);
+
+ LOG.debug("Creating new MetricsRegionSourceImpl for table " +
+ regionWrapper.getTableName() +
+ " " +
+ regionWrapper.getRegionName());
+
+ registry = agg.getMetricsRegistry();
+
+ regionNamePrefix = "table." + regionWrapper.getTableName() + "."
+ + "region." + regionWrapper.getRegionName() + ".";
+
+ String suffix = "Count";
+
+
+ regionPutKey = regionNamePrefix + MetricsRegionServerSource.PUT_KEY + suffix;
+ regionPut = registry.getLongCounter(regionPutKey, 0l);
+
+ regionDeleteKey = regionNamePrefix + MetricsRegionServerSource.DELETE_KEY + suffix;
+ regionDelete = registry.getLongCounter(regionDeleteKey, 0l);
+
+ regionGetKey = regionNamePrefix + MetricsRegionServerSource.GET_KEY + suffix;
+ regionGet = registry.getLongCounter(regionGetKey, 0l);
+
+ regionIncrementKey = regionNamePrefix + MetricsRegionServerSource.INCREMENT_KEY + suffix;
+ regionIncrement = registry.getLongCounter(regionIncrementKey, 0l);
+
+ regionAppendKey = regionNamePrefix + MetricsRegionServerSource.APPEND_KEY + suffix;
+ regionAppend = registry.getLongCounter(regionAppendKey, 0l);
+ }
+
+ @Override
+ public void close() {
+ closed = true;
+ agg.deregister(this);
+
+ LOG.trace("Removing region Metrics: " + regionWrapper.getRegionName());
+ registry.removeMetric(regionPutKey);
+ registry.removeMetric(regionDeleteKey);
+ registry.removeMetric(regionGetKey);
+ registry.removeMetric(regionIncrementKey);
+
+ registry.removeMetric(regionAppendKey);
+
+ JmxCacheBuster.clearJmxCache();
+ }
+
+ @Override
+ public void updatePut() {
+ regionPut.incr();
+ }
+
+ @Override
+ public void updateDelete() {
+ regionDelete.incr();
+ }
+
+ @Override
+ public void updateGet() {
+ regionGet.incr();
+ }
+
+ @Override
+ public void updateIncrement() {
+ regionIncrement.incr();
+ }
+
+ @Override
+ public void updateAppend() {
+ regionAppend.incr();
+ }
+
+ @Override
+ public MetricsRegionAggregateSource getAggregateSource() {
+ return agg;
+ }
+
+ @Override
+ public int compareTo(MetricsRegionSource source) {
+
+ if (!(source instanceof MetricsRegionSourceImpl))
+ return -1;
+
+ MetricsRegionSourceImpl impl = (MetricsRegionSourceImpl) source;
+ return this.regionWrapper.getRegionName()
+ .compareTo(impl.regionWrapper.getRegionName());
+ }
+
+ void snapshot(MetricsRecordBuilder mrb, boolean ignored) {
+ if (closed) return;
+
+ mrb.addGauge(regionNamePrefix + MetricsRegionServerSource.STORE_COUNT,
+ MetricsRegionServerSource.STORE_COUNT_DESC,
+ this.regionWrapper.getNumStores());
+ mrb.addGauge(regionNamePrefix + MetricsRegionServerSource.STOREFILE_COUNT,
+ MetricsRegionServerSource.STOREFILE_COUNT_DESC,
+ this.regionWrapper.getNumStoreFiles());
+ mrb.addGauge(regionNamePrefix + MetricsRegionServerSource.MEMSTORE_SIZE,
+ MetricsRegionServerSource.MEMSTORE_SIZE_DESC,
+ this.regionWrapper.getMemstoreSize());
+ mrb.addGauge(regionNamePrefix + MetricsRegionServerSource.STOREFILE_SIZE,
+ MetricsRegionServerSource.STOREFILE_SIZE_DESC,
+ this.regionWrapper.getStoreFileSize());
+ mrb.addCounter(regionNamePrefix + MetricsRegionServerSource.READ_REQUEST_COUNT,
+ MetricsRegionServerSource.READ_REQUEST_COUNT_DESC,
+ this.regionWrapper.getReadRequestCount());
+ mrb.addCounter(regionNamePrefix + MetricsRegionServerSource.WRITE_REQUEST_COUNT,
+ MetricsRegionServerSource.WRITE_REQUEST_COUNT_DESC,
+ this.regionWrapper.getWriteRequestCount());
+
+ }
+}
diff --git a/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationMetricsSourceImpl.java b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceImpl.java
similarity index 75%
rename from hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationMetricsSourceImpl.java
rename to hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceImpl.java
index 0cb8cf9392d..d8da3b37933 100644
--- a/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationMetricsSourceImpl.java
+++ b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceImpl.java
@@ -16,22 +16,22 @@
* limitations under the License.
*/
-package org.apache.hadoop.hbase.replication.regionserver.metrics;
+package org.apache.hadoop.hbase.replication.regionserver;
-import org.apache.hadoop.hbase.metrics.BaseMetricsSourceImpl;
+import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
/**
- * Hadoop1 implementation of ReplicationMetricsSource. This provides access to metrics gauges and
+ * Hadoop1 implementation of MetricsReplicationSource. This provides access to metrics gauges and
* counters.
*/
-public class ReplicationMetricsSourceImpl extends BaseMetricsSourceImpl implements
- ReplicationMetricsSource {
+public class MetricsReplicationSourceImpl extends BaseSourceImpl implements
+ MetricsReplicationSource {
- public ReplicationMetricsSourceImpl() {
+ public MetricsReplicationSourceImpl() {
this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT);
}
- ReplicationMetricsSourceImpl(String metricsName,
+ MetricsReplicationSourceImpl(String metricsName,
String metricsDescription,
String metricsContext,
String metricsJmxContext) {
diff --git a/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/rest/metrics/RESTMetricsSourceImpl.java b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSourceImpl.java
similarity index 90%
rename from hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/rest/metrics/RESTMetricsSourceImpl.java
rename to hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSourceImpl.java
index eff11dc94f2..c63aa0505b2 100644
--- a/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/rest/metrics/RESTMetricsSourceImpl.java
+++ b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSourceImpl.java
@@ -16,16 +16,16 @@
* limitations under the License.
*/
-package org.apache.hadoop.hbase.rest.metrics;
+package org.apache.hadoop.hbase.rest;
-import org.apache.hadoop.hbase.metrics.BaseMetricsSourceImpl;
+import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
import org.apache.hadoop.metrics2.lib.MetricMutableCounterLong;
/**
* Hadoop One implementation of a metrics2 source that will export metrics from the Rest server to
* the hadoop metrics2 subsystem.
*/
-public class RESTMetricsSourceImpl extends BaseMetricsSourceImpl implements RESTMetricsSource {
+public class MetricsRESTSourceImpl extends BaseSourceImpl implements MetricsRESTSource {
private MetricMutableCounterLong request;
private MetricMutableCounterLong sucGet;
@@ -35,11 +35,11 @@ public class RESTMetricsSourceImpl extends BaseMetricsSourceImpl implements REST
private MetricMutableCounterLong fPut;
private MetricMutableCounterLong fDel;
- public RESTMetricsSourceImpl() {
+ public MetricsRESTSourceImpl() {
this(METRICS_NAME, METRICS_DESCRIPTION, CONTEXT, JMX_CONTEXT);
}
- public RESTMetricsSourceImpl(String metricsName,
+ public MetricsRESTSourceImpl(String metricsName,
String metricsDescription,
String metricsContext,
String metricsJmxContext) {
diff --git a/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/thrift/metrics/ThriftServerMetricsSourceFactoryImpl.java b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactoryImpl.java
similarity index 75%
rename from hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/thrift/metrics/ThriftServerMetricsSourceFactoryImpl.java
rename to hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactoryImpl.java
index 803c657752a..8762d65e088 100644
--- a/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/thrift/metrics/ThriftServerMetricsSourceFactoryImpl.java
+++ b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactoryImpl.java
@@ -16,13 +16,13 @@
* limitations under the License.
*/
-package org.apache.hadoop.hbase.thrift.metrics;
+package org.apache.hadoop.hbase.thrift;
/**
* Class used to create metrics sources for Thrift and Thrift2 servers in hadoop 1's compat
* library.
*/
-public class ThriftServerMetricsSourceFactoryImpl implements ThriftServerMetricsSourceFactory {
+public class MetricsThriftServerSourceFactoryImpl implements MetricsThriftServerSourceFactory {
/**
* A singleton used to make sure that only one thrift metrics source per server type is ever
@@ -30,23 +30,23 @@ public class ThriftServerMetricsSourceFactoryImpl implements ThriftServerMetrics
*/
private static enum FactoryStorage {
INSTANCE;
- ThriftServerMetricsSourceImpl thriftOne = new ThriftServerMetricsSourceImpl(METRICS_NAME,
+ MetricsThriftServerSourceImpl thriftOne = new MetricsThriftServerSourceImpl(METRICS_NAME,
METRICS_DESCRIPTION,
THRIFT_ONE_METRICS_CONTEXT,
THRIFT_ONE_JMX_CONTEXT);
- ThriftServerMetricsSourceImpl thriftTwo = new ThriftServerMetricsSourceImpl(METRICS_NAME,
+ MetricsThriftServerSourceImpl thriftTwo = new MetricsThriftServerSourceImpl(METRICS_NAME,
METRICS_DESCRIPTION,
THRIFT_TWO_METRICS_CONTEXT,
THRIFT_TWO_JMX_CONTEXT);
}
@Override
- public ThriftServerMetricsSource createThriftOneSource() {
+ public MetricsThriftServerSource createThriftOneSource() {
return FactoryStorage.INSTANCE.thriftOne;
}
@Override
- public ThriftServerMetricsSource createThriftTwoSource() {
+ public MetricsThriftServerSource createThriftTwoSource() {
return FactoryStorage.INSTANCE.thriftTwo;
}
}
diff --git a/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/thrift/metrics/ThriftServerMetricsSourceImpl.java b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceImpl.java
similarity index 86%
rename from hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/thrift/metrics/ThriftServerMetricsSourceImpl.java
rename to hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceImpl.java
index 7e5d0c4b6a2..6d57186eb7a 100644
--- a/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/thrift/metrics/ThriftServerMetricsSourceImpl.java
+++ b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceImpl.java
@@ -16,18 +16,17 @@
* limitations under the License.
*/
-package org.apache.hadoop.hbase.thrift.metrics;
+package org.apache.hadoop.hbase.thrift;
-import org.apache.hadoop.hbase.metrics.BaseMetricsSourceImpl;
-import org.apache.hadoop.hbase.thrift.metrics.ThriftServerMetricsSource;
+import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
import org.apache.hadoop.metrics2.lib.MetricMutableGaugeLong;
import org.apache.hadoop.metrics2.lib.MetricMutableStat;
/**
- * Hadoop 1 version of ThriftServerMetricsSource{@link ThriftServerMetricsSource}
+ * Hadoop 1 version of MetricsThriftServerSource{@link MetricsThriftServerSource}
*/
-public class ThriftServerMetricsSourceImpl extends BaseMetricsSourceImpl implements
- ThriftServerMetricsSource {
+public class MetricsThriftServerSourceImpl extends BaseSourceImpl implements
+ MetricsThriftServerSource {
private MetricMutableStat batchGetStat;
@@ -39,7 +38,7 @@ public class ThriftServerMetricsSourceImpl extends BaseMetricsSourceImpl impleme
private MetricMutableGaugeLong callQueueLenGauge;
- public ThriftServerMetricsSourceImpl(String metricsName,
+ public MetricsThriftServerSourceImpl(String metricsName,
String metricsDescription,
String metricsContext,
String metricsJmxContext) {
diff --git a/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java
new file mode 100644
index 00000000000..1e2eb2f4fa6
--- /dev/null
+++ b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java
@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics2.impl;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+
+/**
+ * JMX caches the beans that have been exported; even after the values are removed from hadoop's
+ * metrics system the keys and old values will still remain. This class stops and restarts the
+ * Hadoop metrics system, forcing JMX to clear the cache of exported metrics.
+ *
+ * This class need to be in the o.a.h.metrics2.impl namespace as many of the variables/calls used
+ * are package private.
+ */
+public class JmxCacheBuster {
+ private static final Log LOG = LogFactory.getLog(JmxCacheBuster.class);
+
+ /**
+ * For JMX to forget about all previously exported metrics.
+ */
+ public static void clearJmxCache() {
+ LOG.trace("Clearing JMX mbean cache.");
+
+ // This is pretty extreme but it's the best way that
+ // I could find to get metrics to be removed.
+
+ try {
+ DefaultMetricsSystem.INSTANCE.stop();
+ DefaultMetricsSystem.INSTANCE.start();
+ } catch (Exception exception ) {
+ LOG.debug("error clearing the jmx it appears the metrics system hasn't been started", exception);
+ }
+ }
+}
diff --git a/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java
index 04fb2a96dd5..3f0bc47eecf 100644
--- a/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java
+++ b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java
@@ -23,6 +23,8 @@ import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.metrics2.MetricsException;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.MetricsTag;
@@ -39,6 +41,8 @@ import org.apache.hadoop.metrics2.MetricsTag;
*/
public class DynamicMetricsRegistry {
+ private final Log LOG = LogFactory.getLog(this.getClass());
+
/** key for the context tag */
public static final String CONTEXT_KEY = "context";
/** description for the context tag */
@@ -284,6 +288,7 @@ public class DynamicMetricsRegistry {
* @param all get all the metrics even if the values are not changed.
*/
public void snapshot(MetricsRecordBuilder builder, boolean all) {
+
for (Entry entry : tags()) {
builder.add(entry.getValue());
}
diff --git a/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricMutableHistogram.java b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricMutableHistogram.java
index 166af08d8f9..b7c24dd0e38 100644
--- a/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricMutableHistogram.java
+++ b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricMutableHistogram.java
@@ -21,9 +21,8 @@ package org.apache.hadoop.metrics2.lib;
import com.yammer.metrics.stats.ExponentiallyDecayingSample;
import com.yammer.metrics.stats.Sample;
import com.yammer.metrics.stats.Snapshot;
-import org.apache.hadoop.metrics.MetricHistogram;
+import org.apache.hadoop.metrics2.MetricHistogram;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
-import org.apache.hadoop.metrics2.lib.MetricMutable;
import java.util.concurrent.atomic.AtomicLong;
diff --git a/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricMutableQuantiles.java b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricMutableQuantiles.java
index 7f4b71b9236..e80095f96fb 100644
--- a/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricMutableQuantiles.java
+++ b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricMutableQuantiles.java
@@ -20,8 +20,8 @@ package org.apache.hadoop.metrics2.lib;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.metrics.MetricHistogram;
-import org.apache.hadoop.metrics.MetricsExecutor;
+import org.apache.hadoop.metrics2.MetricHistogram;
+import org.apache.hadoop.metrics2.MetricsExecutor;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.util.MetricQuantile;
import org.apache.hadoop.metrics2.util.MetricSampleQuantiles;
diff --git a/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricsExecutorImpl.java b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricsExecutorImpl.java
index 31357581891..d47912c273c 100644
--- a/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricsExecutorImpl.java
+++ b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricsExecutorImpl.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.metrics2.lib;
-import org.apache.hadoop.metrics.MetricsExecutor;
+import org.apache.hadoop.metrics2.MetricsExecutor;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledThreadPoolExecutor;
diff --git a/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.MetricsMasterSourceFactory b/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.MetricsMasterSourceFactory
new file mode 100644
index 00000000000..a5e43e4fcd2
--- /dev/null
+++ b/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.MetricsMasterSourceFactory
@@ -0,0 +1 @@
+org.apache.hadoop.hbase.master.MetricsMasterSourceFactoryImpl
\ No newline at end of file
diff --git a/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.metrics.MasterMetricsSourceFactory b/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.metrics.MasterMetricsSourceFactory
deleted file mode 100644
index e81c3dcc43f..00000000000
--- a/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.metrics.MasterMetricsSourceFactory
+++ /dev/null
@@ -1 +0,0 @@
-org.apache.hadoop.hbase.master.metrics.MasterMetricsSourceFactoryImpl
diff --git a/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactory b/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactory
new file mode 100644
index 00000000000..bc2f6430478
--- /dev/null
+++ b/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactory
@@ -0,0 +1 @@
+org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactoryImpl
diff --git a/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSource b/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSource
new file mode 100644
index 00000000000..1e0dd200e6f
--- /dev/null
+++ b/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSource
@@ -0,0 +1 @@
+org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceImpl
\ No newline at end of file
diff --git a/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.replication.regionserver.metrics.ReplicationMetricsSource b/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.replication.regionserver.metrics.ReplicationMetricsSource
deleted file mode 100644
index bb64ad5ba0d..00000000000
--- a/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.replication.regionserver.metrics.ReplicationMetricsSource
+++ /dev/null
@@ -1 +0,0 @@
-org.apache.hadoop.hbase.replication.regionserver.metrics.ReplicationMetricsSourceImpl
\ No newline at end of file
diff --git a/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.rest.MetricsRESTSource b/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.rest.MetricsRESTSource
new file mode 100644
index 00000000000..5a4a8e9c044
--- /dev/null
+++ b/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.rest.MetricsRESTSource
@@ -0,0 +1 @@
+org.apache.hadoop.hbase.rest.MetricsRESTSourceImpl
\ No newline at end of file
diff --git a/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.rest.metrics.RESTMetricsSource b/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.rest.metrics.RESTMetricsSource
deleted file mode 100644
index 9e7a28d7b9b..00000000000
--- a/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.rest.metrics.RESTMetricsSource
+++ /dev/null
@@ -1 +0,0 @@
-org.apache.hadoop.hbase.rest.metrics.RESTMetricsSourceImpl
\ No newline at end of file
diff --git a/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactory b/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactory
new file mode 100644
index 00000000000..2b5c16338cd
--- /dev/null
+++ b/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactory
@@ -0,0 +1 @@
+org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactoryImpl
\ No newline at end of file
diff --git a/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.thrift.metrics.ThriftServerMetricsSourceFactory b/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.thrift.metrics.ThriftServerMetricsSourceFactory
deleted file mode 100644
index 62d1c6a9325..00000000000
--- a/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.thrift.metrics.ThriftServerMetricsSourceFactory
+++ /dev/null
@@ -1 +0,0 @@
-org.apache.hadoop.hbase.thrift.metrics.ThriftServerMetricsSourceFactoryImpl
\ No newline at end of file
diff --git a/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.metrics2.MetricsExecutor b/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.metrics2.MetricsExecutor
new file mode 100644
index 00000000000..dc120525ba8
--- /dev/null
+++ b/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.metrics2.MetricsExecutor
@@ -0,0 +1 @@
+org.apache.hadoop.metrics2.lib.MetricsExecutorImpl
diff --git a/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/master/metrics/TestMasterMetricsSourceImpl.java b/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterSourceImpl.java
similarity index 60%
rename from hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/master/metrics/TestMasterMetricsSourceImpl.java
rename to hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterSourceImpl.java
index fe384d7f405..4cdd60677fe 100644
--- a/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/master/metrics/TestMasterMetricsSourceImpl.java
+++ b/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterSourceImpl.java
@@ -16,26 +16,29 @@
* limitations under the License.
*/
-package org.apache.hadoop.hbase.master.metrics;
+package org.apache.hadoop.hbase.master;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
+import org.apache.hadoop.hbase.master.MetricsMasterSource;
+import org.apache.hadoop.hbase.master.MetricsMasterSourceFactory;
+import org.apache.hadoop.hbase.master.MetricsMasterSourceImpl;
import org.junit.Test;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
/**
- * Test for MasterMetricsSourceImpl
+ * Test for MetricsMasterSourceImpl
*/
-public class TestMasterMetricsSourceImpl {
+public class TestMetricsMasterSourceImpl {
@Test
public void testGetInstance() throws Exception {
- MasterMetricsSourceFactory masterMetricsSourceFactory = CompatibilitySingletonFactory
- .getInstance(MasterMetricsSourceFactory.class);
- MasterMetricsSource masterMetricsSource = masterMetricsSourceFactory.create(null);
- assertTrue(masterMetricsSource instanceof MasterMetricsSourceImpl);
- assertSame(masterMetricsSourceFactory, CompatibilitySingletonFactory.getInstance(MasterMetricsSourceFactory.class));
+ MetricsMasterSourceFactory metricsMasterSourceFactory = CompatibilitySingletonFactory
+ .getInstance(MetricsMasterSourceFactory.class);
+ MetricsMasterSource masterSource = metricsMasterSourceFactory.create(null);
+ assertTrue(masterSource instanceof MetricsMasterSourceImpl);
+ assertSame(metricsMasterSourceFactory, CompatibilitySingletonFactory.getInstance(MetricsMasterSourceFactory.class));
}
}
diff --git a/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseMetricsSourceImplTest.java b/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseSourceImpl.java
similarity index 83%
rename from hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseMetricsSourceImplTest.java
rename to hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseSourceImpl.java
index 095cb14e04b..400609bd6dc 100644
--- a/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseMetricsSourceImplTest.java
+++ b/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseSourceImpl.java
@@ -28,15 +28,15 @@ import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertSame;
/**
- * Test of the default BaseMetricsSource implementation for hadoop 1
+ * Test of the default BaseSource implementation for hadoop 1
*/
-public class TestBaseMetricsSourceImplTest {
+public class TestBaseSourceImpl {
- private static BaseMetricsSourceImpl bmsi;
+ private static BaseSourceImpl bmsi;
@BeforeClass
public static void setUp() throws Exception {
- bmsi = new BaseMetricsSourceImpl("TestName", "test description", "testcontext", "TestContext");
+ bmsi = new BaseSourceImpl("TestName", "test description", "testcontext", "TestContext");
}
@Test
@@ -81,17 +81,11 @@ public class TestBaseMetricsSourceImplTest {
}
@Test
- public void testRemoveGauge() throws Exception {
+ public void testRemoveMetric() throws Exception {
bmsi.setGauge("testrm", 100);
- bmsi.removeGauge("testrm");
+ bmsi.removeMetric("testrm");
assertNull(bmsi.metricsRegistry.get("testrm"));
}
- @Test
- public void testRemoveCounter() throws Exception {
- bmsi.incCounters("testrm", 100);
- bmsi.removeCounter("testrm");
- assertNull(bmsi.metricsRegistry.get("testrm"));
- }
}
diff --git a/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerSourceImpl.java b/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerSourceImpl.java
new file mode 100644
index 00000000000..7509bf51dce
--- /dev/null
+++ b/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerSourceImpl.java
@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
+import org.junit.Test;
+
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Test for MetricsRegionServerSourceImpl
+ */
+public class TestMetricsRegionServerSourceImpl {
+
+ @Test
+ public void testGetInstance() throws Exception {
+ MetricsRegionServerSourceFactory metricsRegionServerSourceFactory =
+ CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class);
+ MetricsRegionServerSource serverSource =
+ metricsRegionServerSourceFactory.createServer(null);
+ assertTrue(serverSource instanceof MetricsRegionServerSourceImpl);
+ assertSame(metricsRegionServerSourceFactory,
+ CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class));
+ }
+
+
+ @Test(expected = RuntimeException.class)
+ public void testNoGetRegionServerMetricsSourceImpl() throws Exception {
+ // This should throw an exception because MetricsRegionServerSourceImpl should only
+ // be created by a factory.
+ CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceImpl.class);
+ }
+
+}
diff --git a/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java b/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java
new file mode 100644
index 00000000000..89c0762e6d9
--- /dev/null
+++ b/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java
@@ -0,0 +1,101 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+public class TestMetricsRegionSourceImpl {
+
+ @Test
+ public void testCompareTo() throws Exception {
+ MetricsRegionServerSourceFactory fact = CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class);
+
+ MetricsRegionSource one = fact.createRegion(new RegionWrapperStub("TEST"));
+ MetricsRegionSource oneClone = fact.createRegion(new RegionWrapperStub("TEST"));
+ MetricsRegionSource two = fact.createRegion(new RegionWrapperStub("TWO"));
+
+ assertEquals(0, one.compareTo(oneClone));
+
+ assertTrue( one.compareTo(two) < 0);
+ assertTrue( two.compareTo(one) > 0);
+ }
+
+
+ @Test(expected = RuntimeException.class)
+ public void testNoGetRegionServerMetricsSourceImpl() throws Exception {
+ // This should throw an exception because MetricsRegionSourceImpl should only
+ // be created by a factory.
+ CompatibilitySingletonFactory.getInstance(MetricsRegionSource.class);
+ }
+
+ class RegionWrapperStub implements MetricsRegionWrapper {
+
+ private String regionName;
+
+ public RegionWrapperStub(String regionName) {
+
+
+ this.regionName = regionName;
+ }
+
+ @Override
+ public String getTableName() {
+ return null; //To change body of implemented methods use File | Settings | File Templates.
+ }
+
+ @Override
+ public String getRegionName() {
+ return this.regionName;
+ }
+
+ @Override
+ public long getNumStores() {
+ return 0; //To change body of implemented methods use File | Settings | File Templates.
+ }
+
+ @Override
+ public long getNumStoreFiles() {
+ return 0; //To change body of implemented methods use File | Settings | File Templates.
+ }
+
+ @Override
+ public long getMemstoreSize() {
+ return 0; //To change body of implemented methods use File | Settings | File Templates.
+ }
+
+ @Override
+ public long getStoreFileSize() {
+ return 0; //To change body of implemented methods use File | Settings | File Templates.
+ }
+
+ @Override
+ public long getReadRequestCount() {
+ return 0; //To change body of implemented methods use File | Settings | File Templates.
+ }
+
+ @Override
+ public long getWriteRequestCount() {
+ return 0; //To change body of implemented methods use File | Settings | File Templates.
+ }
+ }
+}
diff --git a/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/metrics/TestReplicationMetricsSourceImpl.java b/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationMetricsSourceImpl.java
similarity index 69%
rename from hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/metrics/TestReplicationMetricsSourceImpl.java
rename to hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationMetricsSourceImpl.java
index 411d5beacd1..dd1c3a70f97 100644
--- a/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/metrics/TestReplicationMetricsSourceImpl.java
+++ b/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationMetricsSourceImpl.java
@@ -16,22 +16,24 @@
* limitations under the License.
*/
-package org.apache.hadoop.hbase.replication.regionserver.metrics;
+package org.apache.hadoop.hbase.replication.regionserver;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
+import org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSource;
+import org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceImpl;
import org.junit.Test;
import static org.junit.Assert.assertTrue;
/**
- * Test to make sure that ReplicationMetricsSourceImpl is hooked up to ServiceLoader
+ * Test to make sure that MetricsReplicationSourceImpl is hooked up to ServiceLoader
*/
public class TestReplicationMetricsSourceImpl {
@Test
public void testGetInstance() throws Exception {
- ReplicationMetricsSource rms = CompatibilitySingletonFactory
- .getInstance(ReplicationMetricsSource.class);
- assertTrue(rms instanceof ReplicationMetricsSourceImpl);
+ MetricsReplicationSource rms = CompatibilitySingletonFactory
+ .getInstance(MetricsReplicationSource.class);
+ assertTrue(rms instanceof MetricsReplicationSourceImpl);
}
}
diff --git a/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/rest/metrics/TestRESTMetricsSourceImpl.java b/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/rest/TestRESTMetricsSourceImpl.java
similarity index 73%
rename from hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/rest/metrics/TestRESTMetricsSourceImpl.java
rename to hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/rest/TestRESTMetricsSourceImpl.java
index 3f309eba437..30ffd6ea812 100644
--- a/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/rest/metrics/TestRESTMetricsSourceImpl.java
+++ b/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/rest/TestRESTMetricsSourceImpl.java
@@ -16,23 +16,25 @@
* limitations under the License.
*/
-package org.apache.hadoop.hbase.rest.metrics;
+package org.apache.hadoop.hbase.rest;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
+import org.apache.hadoop.hbase.rest.MetricsRESTSource;
+import org.apache.hadoop.hbase.rest.MetricsRESTSourceImpl;
import org.junit.Test;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
/**
- * Test for hadoop1's version of RESTMetricsSource
+ * Test for hadoop1's version of MetricsRESTSource
*/
public class TestRESTMetricsSourceImpl {
@Test
public void ensureCompatRegistered() throws Exception {
- assertNotNull(CompatibilitySingletonFactory.getInstance(RESTMetricsSource.class));
- assertTrue(CompatibilitySingletonFactory.getInstance(RESTMetricsSource.class) instanceof RESTMetricsSourceImpl);
+ assertNotNull(CompatibilitySingletonFactory.getInstance(MetricsRESTSource.class));
+ assertTrue(CompatibilitySingletonFactory.getInstance(MetricsRESTSource.class) instanceof MetricsRESTSourceImpl);
}
}
diff --git a/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelperImpl.java b/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelperImpl.java
index 346047c623d..a54a3ee25e1 100644
--- a/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelperImpl.java
+++ b/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelperImpl.java
@@ -18,8 +18,8 @@
package org.apache.hadoop.hbase.test;
-import org.apache.hadoop.hbase.metrics.BaseMetricsSource;
-import org.apache.hadoop.hbase.metrics.BaseMetricsSourceImpl;
+import org.apache.hadoop.hbase.metrics.BaseSource;
+import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
import org.apache.hadoop.metrics2.Metric;
import org.apache.hadoop.metrics2.MetricsBuilder;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
@@ -110,68 +110,68 @@ public class MetricsAssertHelperImpl implements MetricsAssertHelper {
}
@Override
- public void assertTag(String name, String expected, BaseMetricsSource source) {
+ public void assertTag(String name, String expected, BaseSource source) {
getMetrics(source);
String cName = canonicalizeMetricName(name);
assertEquals("Tags should be equal", expected, tags.get(cName));
}
@Override
- public void assertGauge(String name, long expected, BaseMetricsSource source) {
+ public void assertGauge(String name, long expected, BaseSource source) {
long found = getGaugeLong(name, source);
assertEquals("Metrics Should be equal", (long) Long.valueOf(expected), found);
}
@Override
- public void assertGaugeGt(String name, long expected, BaseMetricsSource source) {
+ public void assertGaugeGt(String name, long expected, BaseSource source) {
double found = getGaugeDouble(name, source);
assertTrue(name + " (" + found + ") should be greater than " + expected, found > expected);
}
@Override
- public void assertGaugeLt(String name, long expected, BaseMetricsSource source) {
+ public void assertGaugeLt(String name, long expected, BaseSource source) {
double found = getGaugeDouble(name, source);
assertTrue(name + "(" + found + ") should be less than " + expected, found < expected);
}
@Override
- public void assertGauge(String name, double expected, BaseMetricsSource source) {
+ public void assertGauge(String name, double expected, BaseSource source) {
double found = getGaugeDouble(name, source);
- assertEquals("Metrics Should be equal", (double) Double.valueOf(expected), found);
+ assertEquals("Metrics Should be equal", (double) Double.valueOf(expected), found, 0.01);
}
@Override
- public void assertGaugeGt(String name, double expected, BaseMetricsSource source) {
+ public void assertGaugeGt(String name, double expected, BaseSource source) {
double found = getGaugeDouble(name, source);
assertTrue(name + "(" + found + ") should be greater than " + expected, found > expected);
}
@Override
- public void assertGaugeLt(String name, double expected, BaseMetricsSource source) {
+ public void assertGaugeLt(String name, double expected, BaseSource source) {
double found = getGaugeDouble(name, source);
assertTrue(name + "(" + found + ") should be less than " + expected, found < expected);
}
@Override
- public void assertCounter(String name, long expected, BaseMetricsSource source) {
+ public void assertCounter(String name, long expected, BaseSource source) {
long found = getCounter(name, source);
assertEquals("Metrics Counters should be equal", (long) Long.valueOf(expected), found);
}
@Override
- public void assertCounterGt(String name, long expected, BaseMetricsSource source) {
+ public void assertCounterGt(String name, long expected, BaseSource source) {
long found = getCounter(name, source);
assertTrue(name + " (" + found + ") should be greater than " + expected, found > expected);
}
@Override
- public void assertCounterLt(String name, long expected, BaseMetricsSource source) {
+ public void assertCounterLt(String name, long expected, BaseSource source) {
long found = getCounter(name, source);
assertTrue(name + "(" + found + ") should be less than " + expected, found < expected);
}
@Override
- public long getCounter(String name, BaseMetricsSource source) {
+ public long getCounter(String name, BaseSource source) {
getMetrics(source);
String cName = canonicalizeMetricName(name);
assertNotNull(counters.get(cName));
@@ -179,7 +179,7 @@ public class MetricsAssertHelperImpl implements MetricsAssertHelper {
}
@Override
- public double getGaugeDouble(String name, BaseMetricsSource source) {
+ public double getGaugeDouble(String name, BaseSource source) {
getMetrics(source);
String cName = canonicalizeMetricName(name);
assertNotNull(gauges.get(cName));
@@ -187,7 +187,7 @@ public class MetricsAssertHelperImpl implements MetricsAssertHelper {
}
@Override
- public long getGaugeLong(String name, BaseMetricsSource source) {
+ public long getGaugeLong(String name, BaseSource source) {
getMetrics(source);
String cName = canonicalizeMetricName(name);
assertNotNull(gauges.get(cName));
@@ -200,12 +200,12 @@ public class MetricsAssertHelperImpl implements MetricsAssertHelper {
counters.clear();
}
- private void getMetrics(BaseMetricsSource source) {
+ private void getMetrics(BaseSource source) {
reset();
- if (!(source instanceof BaseMetricsSourceImpl)) {
+ if (!(source instanceof BaseSourceImpl)) {
assertTrue(false);
}
- BaseMetricsSourceImpl impl = (BaseMetricsSourceImpl) source;
+ BaseSourceImpl impl = (BaseSourceImpl) source;
impl.getMetrics(new MockMetricsBuilder(), true);
diff --git a/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/thrift/metrics/TestThriftServerMetricsSourceFactoryImpl.java b/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServerMetricsSourceFactoryImpl.java
similarity index 67%
rename from hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/thrift/metrics/TestThriftServerMetricsSourceFactoryImpl.java
rename to hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServerMetricsSourceFactoryImpl.java
index c7b362fd2ba..c768399b815 100644
--- a/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/thrift/metrics/TestThriftServerMetricsSourceFactoryImpl.java
+++ b/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServerMetricsSourceFactoryImpl.java
@@ -16,9 +16,11 @@
* limitations under the License.
*/
-package org.apache.hadoop.hbase.thrift.metrics;
+package org.apache.hadoop.hbase.thrift;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
+import org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactory;
+import org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactoryImpl;
import org.junit.Test;
import static org.junit.Assert.assertNotNull;
@@ -26,28 +28,28 @@ import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
/**
- * Test the hadoop 1 version of ThriftServerMetricsSourceFactory
+ * Test the hadoop 1 version of MetricsThriftServerSourceFactory
*/
public class TestThriftServerMetricsSourceFactoryImpl {
@Test
public void testCompatabilityRegistered() throws Exception {
- assertNotNull(CompatibilitySingletonFactory.getInstance(ThriftServerMetricsSourceFactory.class));
- assertTrue(CompatibilitySingletonFactory.getInstance(ThriftServerMetricsSourceFactory.class) instanceof ThriftServerMetricsSourceFactoryImpl);
+ assertNotNull(CompatibilitySingletonFactory.getInstance(MetricsThriftServerSourceFactory.class));
+ assertTrue(CompatibilitySingletonFactory.getInstance(MetricsThriftServerSourceFactory.class) instanceof MetricsThriftServerSourceFactoryImpl);
}
@Test
public void testCreateThriftOneSource() throws Exception {
//Make sure that the factory gives back a singleton.
- assertSame(new ThriftServerMetricsSourceFactoryImpl().createThriftOneSource(),
- new ThriftServerMetricsSourceFactoryImpl().createThriftOneSource());
+ assertSame(new MetricsThriftServerSourceFactoryImpl().createThriftOneSource(),
+ new MetricsThriftServerSourceFactoryImpl().createThriftOneSource());
}
@Test
public void testCreateThriftTwoSource() throws Exception {
//Make sure that the factory gives back a singleton.
- assertSame(new ThriftServerMetricsSourceFactoryImpl().createThriftTwoSource(),
- new ThriftServerMetricsSourceFactoryImpl().createThriftTwoSource());
+ assertSame(new MetricsThriftServerSourceFactoryImpl().createThriftTwoSource(),
+ new MetricsThriftServerSourceFactoryImpl().createThriftTwoSource());
}
}
diff --git a/hbase-hadoop2-compat/pom.xml b/hbase-hadoop2-compat/pom.xml
index df8e764f5f4..754ea1016a1 100644
--- a/hbase-hadoop2-compat/pom.xml
+++ b/hbase-hadoop2-compat/pom.xml
@@ -138,6 +138,10 @@ limitations under the License.
com.yammer.metricsmetrics-core
+
+ log4j
+ log4j
+
diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceFactoryImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactoryImpl.java
similarity index 63%
rename from hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceFactoryImpl.java
rename to hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactoryImpl.java
index 4a170462127..350c39d9152 100644
--- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceFactoryImpl.java
+++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactoryImpl.java
@@ -16,22 +16,22 @@
* limitations under the License.
*/
-package org.apache.hadoop.hbase.master.metrics;
+package org.apache.hadoop.hbase.master;
/**
- * Factory to create MasterMetricsSource when given a MasterMetricsWrapper
+ * Factory to create MetricsMasterSource when given a MetricsMasterWrapper
*/
-public class MasterMetricsSourceFactoryImpl implements MasterMetricsSourceFactory {
+public class MetricsMasterSourceFactoryImpl implements MetricsMasterSourceFactory {
private static enum FactoryStorage {
INSTANCE;
- MasterMetricsSource source;
+ MetricsMasterSource masterSource;
}
@Override
- public synchronized MasterMetricsSource create(MasterMetricsWrapper beanWrapper) {
- if (FactoryStorage.INSTANCE.source == null ) {
- FactoryStorage.INSTANCE.source = new MasterMetricsSourceImpl(beanWrapper);
+ public synchronized MetricsMasterSource create(MetricsMasterWrapper masterWrapper) {
+ if (FactoryStorage.INSTANCE.masterSource == null) {
+ FactoryStorage.INSTANCE.masterSource = new MetricsMasterSourceImpl(masterWrapper);
}
- return FactoryStorage.INSTANCE.source;
+ return FactoryStorage.INSTANCE.masterSource;
}
}
diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java
similarity index 79%
rename from hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceImpl.java
rename to hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java
index 90baeddd3ec..ccc060335dc 100644
--- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceImpl.java
+++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java
@@ -16,9 +16,9 @@
* limitations under the License.
*/
-package org.apache.hadoop.hbase.master.metrics;
+package org.apache.hadoop.hbase.master;
-import org.apache.hadoop.hbase.metrics.BaseMetricsSourceImpl;
+import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
import org.apache.hadoop.metrics2.MetricsCollector;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.lib.Interns;
@@ -26,39 +26,40 @@ import org.apache.hadoop.metrics2.lib.MutableCounterLong;
import org.apache.hadoop.metrics2.lib.MutableGaugeLong;
import org.apache.hadoop.metrics2.lib.MutableHistogram;
-/** Hadoop2 implementation of MasterMetricsSource. */
-public class MasterMetricsSourceImpl
- extends BaseMetricsSourceImpl implements MasterMetricsSource {
+/**
+ * Hadoop2 implementation of MetricsMasterSource.
+ */
+public class MetricsMasterSourceImpl
+ extends BaseSourceImpl implements MetricsMasterSource {
-
- MutableCounterLong clusterRequestsCounter;
- MutableGaugeLong ritGauge;
- MutableGaugeLong ritCountOverThresholdGauge;
- MutableGaugeLong ritOldestAgeGauge;
- private final MasterMetricsWrapper masterWrapper;
+ private final MetricsMasterWrapper masterWrapper;
+ private MutableCounterLong clusterRequestsCounter;
+ private MutableGaugeLong ritGauge;
+ private MutableGaugeLong ritCountOverThresholdGauge;
+ private MutableGaugeLong ritOldestAgeGauge;
private MutableHistogram splitTimeHisto;
private MutableHistogram splitSizeHisto;
- public MasterMetricsSourceImpl(MasterMetricsWrapper masterMetricsWrapper) {
+ public MetricsMasterSourceImpl(MetricsMasterWrapper masterWrapper) {
this(METRICS_NAME,
METRICS_DESCRIPTION,
METRICS_CONTEXT,
METRICS_JMX_CONTEXT,
- masterMetricsWrapper);
+ masterWrapper);
}
- public MasterMetricsSourceImpl(String metricsName,
+ public MetricsMasterSourceImpl(String metricsName,
String metricsDescription,
String metricsContext,
String metricsJmxContext,
- MasterMetricsWrapper masterWrapper) {
+ MetricsMasterWrapper masterWrapper) {
super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
this.masterWrapper = masterWrapper;
}
- @Override
- public void init() {
+ @Override
+ public void init() {
super.init();
clusterRequestsCounter = metricsRegistry.newCounter(CLUSTER_REQUESTS_NAME, "", 0l);
ritGauge = metricsRegistry.newGauge(RIT_COUNT_NAME, "", 0l);
@@ -98,15 +99,15 @@ public class MasterMetricsSourceImpl
public void getMetrics(MetricsCollector metricsCollector, boolean all) {
MetricsRecordBuilder metricsRecordBuilder = metricsCollector.addRecord(metricsName)
- .setContext(metricsContext);
+ .setContext(metricsContext);
// masterWrapper can be null because this function is called inside of init.
if (masterWrapper != null) {
metricsRecordBuilder
.addGauge(Interns.info(MASTER_ACTIVE_TIME_NAME,
- MASTER_ACTIVE_TIME_DESC), masterWrapper.getMasterActiveTime())
+ MASTER_ACTIVE_TIME_DESC), masterWrapper.getActiveTime())
.addGauge(Interns.info(MASTER_START_TIME_NAME,
- MASTER_START_TIME_DESC), masterWrapper.getMasterStartTime())
+ MASTER_START_TIME_DESC), masterWrapper.getStartTime())
.addGauge(Interns.info(AVERAGE_LOAD_NAME, AVERAGE_LOAD_DESC),
masterWrapper.getAverageLoad())
.addGauge(Interns.info(NUM_REGION_SERVERS_NAME,
@@ -123,7 +124,7 @@ public class MasterMetricsSourceImpl
String.valueOf(masterWrapper.getIsActiveMaster()));
}
- metricsRegistry.snapshot(metricsRecordBuilder, true);
+ metricsRegistry.snapshot(metricsRecordBuilder, all);
}
}
diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseMetricsSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSourceImpl.java
similarity index 90%
rename from hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseMetricsSourceImpl.java
rename to hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSourceImpl.java
index 7e37089ca8d..20779512a55 100644
--- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseMetricsSourceImpl.java
+++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSourceImpl.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.metrics;
import org.apache.hadoop.metrics2.MetricsCollector;
import org.apache.hadoop.metrics2.MetricsSource;
+import org.apache.hadoop.metrics2.impl.JmxCacheBuster;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.lib.DynamicMetricsRegistry;
import org.apache.hadoop.metrics2.lib.MetricMutableQuantiles;
@@ -29,9 +30,9 @@ import org.apache.hadoop.metrics2.lib.MutableHistogram;
import org.apache.hadoop.metrics2.source.JvmMetrics;
/**
- * Hadoop 2 implementation of BaseMetricsSource (using metrics2 framework)
+ * Hadoop 2 implementation of BaseSource (using metrics2 framework)
*/
-public class BaseMetricsSourceImpl implements BaseMetricsSource, MetricsSource {
+public class BaseSourceImpl implements BaseSource, MetricsSource {
private static enum DefaultMetricsSystemInitializer {
INSTANCE;
@@ -47,15 +48,13 @@ public class BaseMetricsSourceImpl implements BaseMetricsSource, MetricsSource {
}
}
- public static final String HBASE_METRICS_SYSTEM_NAME = "hbase";
-
protected final DynamicMetricsRegistry metricsRegistry;
protected final String metricsName;
protected final String metricsDescription;
protected final String metricsContext;
protected final String metricsJmxContext;
- public BaseMetricsSourceImpl(
+ public BaseSourceImpl(
String metricsName,
String metricsDescription,
String metricsContext,
@@ -141,20 +140,12 @@ public class BaseMetricsSourceImpl implements BaseMetricsSource, MetricsSource {
*
* @param key
*/
- public void removeGauge(String key) {
+ public void removeMetric(String key) {
metricsRegistry.removeMetric(key);
+ JmxCacheBuster.clearJmxCache();
}
- /**
- * Remove a named counter.
- *
- * @param key
- */
- public void removeCounter(String key) {
- metricsRegistry.removeMetric(key);
- }
-
- protected DynamicMetricsRegistry getMetricsRegistry() {
+ public DynamicMetricsRegistry getMetricsRegistry() {
return metricsRegistry;
}
diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSourceImpl.java
new file mode 100644
index 00000000000..8fea559bde7
--- /dev/null
+++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSourceImpl.java
@@ -0,0 +1,82 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
+import org.apache.hadoop.metrics2.MetricsCollector;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+
+import java.util.TreeSet;
+
+public class MetricsRegionAggregateSourceImpl extends BaseSourceImpl
+ implements MetricsRegionAggregateSource {
+
+ private final Log LOG = LogFactory.getLog(this.getClass());
+
+ private final TreeSet regionSources =
+ new TreeSet();
+
+ public MetricsRegionAggregateSourceImpl() {
+ this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT);
+ }
+
+
+ public MetricsRegionAggregateSourceImpl(String metricsName,
+ String metricsDescription,
+ String metricsContext,
+ String metricsJmxContext) {
+ super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
+ }
+
+ @Override
+ public void register(MetricsRegionSource source) {
+ regionSources.add((MetricsRegionSourceImpl) source);
+ }
+
+ @Override
+ public void deregister(MetricsRegionSource source) {
+ regionSources.remove(source);
+ }
+
+ /**
+ * Yes this is a get function that doesn't return anything. Thanks Hadoop for breaking all
+ * expectations of java programmers. Instead of returning anything Hadoop metrics expects
+ * getMetrics to push the metrics into the collector.
+ *
+ * @param collector the collector
+ * @param all get all the metrics regardless of when they last changed.
+ */
+ @Override
+ public void getMetrics(MetricsCollector collector, boolean all) {
+
+
+ MetricsRecordBuilder mrb = collector.addRecord(metricsName)
+ .setContext(metricsContext);
+
+ if (regionSources != null) {
+ for (MetricsRegionSourceImpl regionMetricSource : regionSources) {
+ regionMetricSource.snapshot(mrb, all);
+ }
+ }
+
+ metricsRegistry.snapshot(mrb, all);
+ }
+}
diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactoryImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactoryImpl.java
new file mode 100644
index 00000000000..dc4ae6abc7c
--- /dev/null
+++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactoryImpl.java
@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver;
+
+/**
+ * Factory to create MetricsRegionServerSource when given a MetricsRegionServerWrapper
+ */
+public class MetricsRegionServerSourceFactoryImpl implements MetricsRegionServerSourceFactory {
+ private static enum FactoryStorage {
+ INSTANCE;
+ private MetricsRegionServerSource serverSource;
+ private MetricsRegionAggregateSourceImpl aggImpl;
+ }
+
+ private synchronized MetricsRegionAggregateSourceImpl getAggregate() {
+ if (FactoryStorage.INSTANCE.aggImpl == null) {
+ FactoryStorage.INSTANCE.aggImpl = new MetricsRegionAggregateSourceImpl();
+ }
+ return FactoryStorage.INSTANCE.aggImpl;
+ }
+
+
+ @Override
+ public synchronized MetricsRegionServerSource createServer(MetricsRegionServerWrapper regionServerWrapper) {
+ if (FactoryStorage.INSTANCE.serverSource == null) {
+ FactoryStorage.INSTANCE.serverSource = new MetricsRegionServerSourceImpl(
+ regionServerWrapper);
+ }
+ return FactoryStorage.INSTANCE.serverSource;
+ }
+
+ @Override
+ public MetricsRegionSource createRegion(MetricsRegionWrapper wrapper) {
+ return new MetricsRegionSourceImpl(wrapper, getAggregate());
+ }
+}
diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
new file mode 100644
index 00000000000..fe8d0231d1d
--- /dev/null
+++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
@@ -0,0 +1,164 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
+import org.apache.hadoop.metrics2.MetricHistogram;
+import org.apache.hadoop.metrics2.MetricsCollector;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.metrics2.lib.Interns;
+
+/**
+ * Hadoop1 implementation of MetricsRegionServerSource.
+ */
+public class MetricsRegionServerSourceImpl
+ extends BaseSourceImpl implements MetricsRegionServerSource {
+
+ final MetricsRegionServerWrapper rsWrap;
+ private final MetricHistogram putHisto;
+ private final MetricHistogram deleteHisto;
+ private final MetricHistogram getHisto;
+ private final MetricHistogram incrementHisto;
+ private final MetricHistogram appendHisto;
+
+ public MetricsRegionServerSourceImpl(MetricsRegionServerWrapper rsWrap) {
+ this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT, rsWrap);
+ }
+
+ public MetricsRegionServerSourceImpl(String metricsName,
+ String metricsDescription,
+ String metricsContext,
+ String metricsJmxContext,
+ MetricsRegionServerWrapper rsWrap) {
+ super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
+ this.rsWrap = rsWrap;
+
+ putHisto = getMetricsRegistry().getHistogram(PUT_KEY);
+ deleteHisto = getMetricsRegistry().getHistogram(DELETE_KEY);
+ getHisto = getMetricsRegistry().getHistogram(GET_KEY);
+ incrementHisto = getMetricsRegistry().getHistogram(INCREMENT_KEY);
+ appendHisto = getMetricsRegistry().getHistogram(APPEND_KEY);
+ }
+
+ @Override
+ public void init() {
+ super.init();
+ }
+
+ @Override
+ public void updatePut(long t) {
+ putHisto.add(t);
+ }
+
+ @Override
+ public void updateDelete(long t) {
+ deleteHisto.add(t);
+ }
+
+ @Override
+ public void updateGet(long t) {
+ getHisto.add(t);
+ }
+
+ @Override
+ public void updateIncrement(long t) {
+ incrementHisto.add(t);
+ }
+
+ @Override
+ public void updateAppend(long t) {
+ appendHisto.add(t);
+ }
+
+ /**
+ * Yes this is a get function that doesn't return anything. Thanks Hadoop for breaking all
+ * expectations of java programmers. Instead of returning anything Hadoop metrics expects
+ * getMetrics to push the metrics into the collector.
+ *
+ * @param metricsCollector Collector to accept metrics
+ * @param all push all or only changed?
+ */
+ @Override
+ public void getMetrics(MetricsCollector metricsCollector, boolean all) {
+
+ MetricsRecordBuilder mrb = metricsCollector.addRecord(metricsName)
+ .setContext(metricsContext);
+
+ // rsWrap can be null because this function is called inside of init.
+ if (rsWrap != null) {
+ mrb.addGauge(Interns.info(REGION_COUNT, REGION_COUNT_DESC), rsWrap.getNumOnlineRegions())
+ .addGauge(Interns.info(STORE_COUNT, STORE_COUNT_DESC), rsWrap.getNumStores())
+ .addGauge(Interns.info(STOREFILE_COUNT, STOREFILE_COUNT_DESC), rsWrap.getNumStoreFiles())
+ .addGauge(Interns.info(MEMSTORE_SIZE, MEMSTORE_SIZE_DESC), rsWrap.getMemstoreSize())
+ .addGauge(Interns.info(STOREFILE_SIZE, STOREFILE_SIZE_DESC), rsWrap.getStoreFileSize())
+ .addGauge(Interns.info(RS_START_TIME_NAME, RS_START_TIME_DESC),
+ rsWrap.getStartCode())
+ .addCounter(Interns.info(TOTAL_REQUEST_COUNT, TOTAL_REQUEST_COUNT_DESC),
+ rsWrap.getTotalRequestCount())
+ .addCounter(Interns.info(READ_REQUEST_COUNT, READ_REQUEST_COUNT_DESC),
+ rsWrap.getReadRequestsCount())
+ .addCounter(Interns.info(WRITE_REQUEST_COUNT, WRITE_REQUEST_COUNT_DESC),
+ rsWrap.getWriteRequestsCount())
+ .addCounter(Interns.info(CHECK_MUTATE_FAILED_COUNT, CHECK_MUTATE_FAILED_COUNT_DESC),
+ rsWrap.getCheckAndMutateChecksFailed())
+ .addCounter(Interns.info(CHECK_MUTATE_PASSED_COUNT, CHECK_MUTATE_PASSED_COUNT_DESC),
+ rsWrap.getCheckAndMutateChecksPassed())
+ .addGauge(Interns.info(STOREFILE_INDEX_SIZE, STOREFILE_INDEX_SIZE_DESC),
+ rsWrap.getStoreFileIndexSize())
+ .addGauge(Interns.info(STATIC_INDEX_SIZE, STATIC_INDEX_SIZE_DESC),
+ rsWrap.getTotalStaticIndexSize())
+ .addGauge(Interns.info(STATIC_BLOOM_SIZE, STATIC_BLOOM_SIZE_DESC),
+ rsWrap.getTotalStaticBloomSize())
+ .addGauge(Interns.info(NUMBER_OF_PUTS_WITHOUT_WAL, NUMBER_OF_PUTS_WITHOUT_WAL_DESC),
+ rsWrap.getNumPutsWithoutWAL())
+ .addGauge(Interns.info(DATA_SIZE_WITHOUT_WAL, DATA_SIZE_WITHOUT_WAL_DESC),
+ rsWrap.getDataInMemoryWithoutWAL())
+ .addGauge(Interns.info(PERCENT_FILES_LOCAL, PERCENT_FILES_LOCAL_DESC),
+ rsWrap.getPercentFileLocal())
+ .addGauge(Interns.info(COMPACTION_QUEUE_LENGTH, COMPACTION_QUEUE_LENGTH_DESC),
+ rsWrap.getCompactionQueueSize())
+ .addGauge(Interns.info(FLUSH_QUEUE_LENGTH, FLUSH_QUEUE_LENGTH_DESC),
+ rsWrap.getFlushQueueSize())
+ .addGauge(Interns.info(BLOCK_CACHE_FREE_SIZE, BLOCK_CACHE_FREE_DESC),
+ rsWrap.getBlockCacheFreeSize())
+ .addGauge(Interns.info(BLOCK_CACHE_COUNT, BLOCK_CACHE_COUNT_DESC),
+ rsWrap.getBlockCacheCount())
+ .addGauge(Interns.info(BLOCK_CACHE_SIZE, BLOCK_CACHE_SIZE_DESC),
+ rsWrap.getBlockCacheSize())
+ .addCounter(Interns.info(BLOCK_CACHE_HIT_COUNT, BLOCK_CACHE_HIT_COUNT_DESC),
+ rsWrap.getBlockCacheHitCount())
+ .addCounter(Interns.info(BLOCK_CACHE_MISS_COUNT, BLOCK_COUNT_MISS_COUNT_DESC),
+ rsWrap.getBlockCacheMissCount())
+ .addCounter(Interns.info(BLOCK_CACHE_EVICTION_COUNT, BLOCK_CACHE_EVICTION_COUNT_DESC),
+ rsWrap.getBlockCacheEvictedCount())
+ .addGauge(Interns.info(BLOCK_CACHE_HIT_PERCENT, BLOCK_CACHE_HIT_PERCENT_DESC),
+ rsWrap.getBlockCacheHitPercent())
+ .addGauge(Interns.info(BLOCK_CACHE_EXPRESS_HIT_PERCENT,
+ BLOCK_CACHE_EXPRESS_HIT_PERCENT_DESC), rsWrap.getBlockCacheHitCachingPercent())
+ .addCounter(Interns.info(UPDATES_BLOCKED_TIME, UPDATES_BLOCKED_DESC),
+ rsWrap.getUpdatesBlockedTime())
+ .tag(Interns.info(ZOOKEEPER_QUORUM_NAME, ZOOKEEPER_QUORUM_DESC),
+ rsWrap.getZookeeperQuorum())
+ .tag(Interns.info(SERVER_NAME_NAME, SERVER_NAME_DESC), rsWrap.getServerName())
+ .tag(Interns.info(CLUSTER_ID_NAME, CLUSTER_ID_DESC), rsWrap.getClusterId());
+ }
+
+ metricsRegistry.snapshot(mrb, all);
+ }
+}
diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java
new file mode 100644
index 00000000000..ad9eb277c16
--- /dev/null
+++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java
@@ -0,0 +1,158 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.metrics2.impl.JmxCacheBuster;
+import org.apache.hadoop.metrics2.lib.DynamicMetricsRegistry;
+import org.apache.hadoop.metrics2.lib.Interns;
+import org.apache.hadoop.metrics2.lib.MutableCounterLong;
+
+public class MetricsRegionSourceImpl implements MetricsRegionSource {
+
+ private final MetricsRegionWrapper regionWrapper;
+ private boolean closed = false;
+ private MetricsRegionAggregateSourceImpl agg;
+ private DynamicMetricsRegistry registry;
+ private static final Log LOG = LogFactory.getLog(MetricsRegionSourceImpl.class);
+
+ private String regionNamePrefix;
+ private String regionPutKey;
+ private String regionDeleteKey;
+ private String regionGetKey;
+ private String regionIncrementKey;
+ private String regionAppendKey;
+ private MutableCounterLong regionPut;
+ private MutableCounterLong regionDelete;
+ private MutableCounterLong regionGet;
+ private MutableCounterLong regionIncrement;
+ private MutableCounterLong regionAppend;
+
+
+ public MetricsRegionSourceImpl(MetricsRegionWrapper regionWrapper,
+ MetricsRegionAggregateSourceImpl aggregate) {
+ this.regionWrapper = regionWrapper;
+ agg = aggregate;
+ agg.register(this);
+
+ LOG.debug("Creating new MetricsRegionSourceImpl for table " +
+ regionWrapper.getTableName() +
+ " " +
+ regionWrapper.getRegionName());
+
+ registry = agg.getMetricsRegistry();
+
+ regionNamePrefix = "table." + regionWrapper.getTableName() + "."
+ + "region." + regionWrapper.getRegionName() + ".";
+
+ String suffix = "Count";
+
+ regionPutKey = regionNamePrefix + MetricsRegionServerSource.PUT_KEY + suffix;
+ regionPut = registry.getLongCounter(regionPutKey, 0l);
+
+ regionDeleteKey = regionNamePrefix + MetricsRegionServerSource.DELETE_KEY + suffix;
+ regionDelete = registry.getLongCounter(regionDeleteKey, 0l);
+
+ regionGetKey = regionNamePrefix + MetricsRegionServerSource.GET_KEY + suffix;
+ regionGet = registry.getLongCounter(regionGetKey, 0l);
+
+ regionIncrementKey = regionNamePrefix + MetricsRegionServerSource.INCREMENT_KEY + suffix;
+ regionIncrement = registry.getLongCounter(regionIncrementKey, 0l);
+
+ regionAppendKey = regionNamePrefix + MetricsRegionServerSource.APPEND_KEY + suffix;
+ regionAppend = registry.getLongCounter(regionAppendKey, 0l);
+ }
+
+ @Override
+ public void close() {
+ closed = true;
+ agg.deregister(this);
+
+ LOG.trace("Removing region Metrics: " + regionWrapper.getRegionName());
+ registry.removeMetric(regionPutKey);
+ registry.removeMetric(regionDeleteKey);
+ registry.removeMetric(regionGetKey);
+ registry.removeMetric(regionIncrementKey);
+
+ registry.removeMetric(regionAppendKey);
+
+ JmxCacheBuster.clearJmxCache();
+ }
+
+ @Override
+ public void updatePut() {
+ regionPut.incr();
+ }
+
+ @Override
+ public void updateDelete() {
+ regionDelete.incr();
+ }
+
+ @Override
+ public void updateGet() {
+ regionGet.incr();
+ }
+
+ @Override
+ public void updateIncrement() {
+ regionIncrement.incr();
+ }
+
+ @Override
+ public void updateAppend() {
+ regionAppend.incr();
+ }
+
+ @Override
+ public MetricsRegionAggregateSource getAggregateSource() {
+ return agg;
+ }
+
+ @Override
+ public int compareTo(MetricsRegionSource source) {
+
+ if (!(source instanceof MetricsRegionSourceImpl))
+ return -1;
+
+ MetricsRegionSourceImpl impl = (MetricsRegionSourceImpl) source;
+ return this.regionWrapper.getRegionName()
+ .compareTo(impl.regionWrapper.getRegionName());
+ }
+
+ void snapshot(MetricsRecordBuilder mrb, boolean ignored) {
+ if (closed) return;
+
+ mrb.addGauge(
+ Interns.info(regionNamePrefix + MetricsRegionServerSource.STORE_COUNT,
+ MetricsRegionServerSource.STORE_COUNT_DESC),
+ this.regionWrapper.getNumStores());
+ mrb.addGauge(Interns.info(regionNamePrefix + MetricsRegionServerSource.STOREFILE_COUNT,
+ MetricsRegionServerSource.STOREFILE_COUNT_DESC),
+ this.regionWrapper.getNumStoreFiles());
+ mrb.addGauge(Interns.info(regionNamePrefix + MetricsRegionServerSource.MEMSTORE_SIZE,
+ MetricsRegionServerSource.MEMSTORE_SIZE_DESC),
+ this.regionWrapper.getMemstoreSize());
+ mrb.addGauge(Interns.info(regionNamePrefix + MetricsRegionServerSource.STOREFILE_SIZE,
+ MetricsRegionServerSource.STOREFILE_SIZE_DESC),
+ this.regionWrapper.getStoreFileSize());
+ }
+}
diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationMetricsSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceImpl.java
similarity index 75%
rename from hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationMetricsSourceImpl.java
rename to hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceImpl.java
index 3f2a40dc32b..594d3271862 100644
--- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationMetricsSourceImpl.java
+++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceImpl.java
@@ -16,23 +16,23 @@
* limitations under the License.
*/
-package org.apache.hadoop.hbase.replication.regionserver.metrics;
+package org.apache.hadoop.hbase.replication.regionserver;
-import org.apache.hadoop.hbase.metrics.BaseMetricsSourceImpl;
+import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
/**
- * Hadoop2 implementation of ReplicationMetricsSource. This provides access to metrics gauges and
+ * Hadoop2 implementation of MetricsReplicationSource. This provides access to metrics gauges and
* counters.
*/
-public class ReplicationMetricsSourceImpl extends BaseMetricsSourceImpl implements
- ReplicationMetricsSource {
+public class MetricsReplicationSourceImpl extends BaseSourceImpl implements
+ MetricsReplicationSource {
- public ReplicationMetricsSourceImpl() {
+ public MetricsReplicationSourceImpl() {
this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT);
}
- ReplicationMetricsSourceImpl(String metricsName,
+ MetricsReplicationSourceImpl(String metricsName,
String metricsDescription,
String metricsContext,
String metricsJmxContext) {
diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/rest/metrics/RESTMetricsSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSourceImpl.java
similarity index 89%
rename from hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/rest/metrics/RESTMetricsSourceImpl.java
rename to hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSourceImpl.java
index a104d36c5a4..14e3cfdc906 100644
--- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/rest/metrics/RESTMetricsSourceImpl.java
+++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSourceImpl.java
@@ -16,16 +16,16 @@
* limitations under the License.
*/
-package org.apache.hadoop.hbase.rest.metrics;
+package org.apache.hadoop.hbase.rest;
-import org.apache.hadoop.hbase.metrics.BaseMetricsSourceImpl;
+import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
import org.apache.hadoop.metrics2.lib.MutableCounterLong;
/**
* Hadoop Two implementation of a metrics2 source that will export metrics from the Rest server to
* the hadoop metrics2 subsystem.
*/
-public class RESTMetricsSourceImpl extends BaseMetricsSourceImpl implements RESTMetricsSource {
+public class MetricsRESTSourceImpl extends BaseSourceImpl implements MetricsRESTSource {
private MutableCounterLong request;
private MutableCounterLong sucGet;
@@ -35,11 +35,11 @@ public class RESTMetricsSourceImpl extends BaseMetricsSourceImpl implements REST
private MutableCounterLong fPut;
private MutableCounterLong fDel;
- public RESTMetricsSourceImpl() {
+ public MetricsRESTSourceImpl() {
this(METRICS_NAME, METRICS_DESCRIPTION, CONTEXT, JMX_CONTEXT);
}
- public RESTMetricsSourceImpl(String metricsName,
+ public MetricsRESTSourceImpl(String metricsName,
String metricsDescription,
String metricsContext,
String metricsJmxContext) {
@@ -92,6 +92,6 @@ public class RESTMetricsSourceImpl extends BaseMetricsSourceImpl implements REST
@Override
public void incrementFailedDeleteRequests(int inc) {
- fDel.incr(inc);
+ fDel.incr(inc);
}
}
diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/metrics/ThriftServerMetricsSourceFactoryImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactoryImpl.java
similarity index 71%
rename from hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/metrics/ThriftServerMetricsSourceFactoryImpl.java
rename to hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactoryImpl.java
index 718e4b0ebf7..b6015403745 100644
--- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/metrics/ThriftServerMetricsSourceFactoryImpl.java
+++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactoryImpl.java
@@ -16,12 +16,12 @@
* limitations under the License.
*/
-package org.apache.hadoop.hbase.thrift.metrics;
+package org.apache.hadoop.hbase.thrift;
/**
- * Class used to create metrics sources for Thrift and Thrift2 servers.
+ * Class used to create metrics sources for Thrift and Thrift2 servers.
*/
-public class ThriftServerMetricsSourceFactoryImpl implements ThriftServerMetricsSourceFactory {
+public class MetricsThriftServerSourceFactoryImpl implements MetricsThriftServerSourceFactory {
/**
* A singleton used to make sure that only one thrift metrics source per server type is ever
@@ -29,23 +29,23 @@ public class ThriftServerMetricsSourceFactoryImpl implements ThriftServerMetrics
*/
private static enum FactoryStorage {
INSTANCE;
- ThriftServerMetricsSourceImpl thriftOne = new ThriftServerMetricsSourceImpl(METRICS_NAME,
+ MetricsThriftServerSourceImpl thriftOne = new MetricsThriftServerSourceImpl(METRICS_NAME,
METRICS_DESCRIPTION,
THRIFT_ONE_METRICS_CONTEXT,
THRIFT_ONE_JMX_CONTEXT);
- ThriftServerMetricsSourceImpl thriftTwo = new ThriftServerMetricsSourceImpl(METRICS_NAME,
+ MetricsThriftServerSourceImpl thriftTwo = new MetricsThriftServerSourceImpl(METRICS_NAME,
METRICS_DESCRIPTION,
THRIFT_TWO_METRICS_CONTEXT,
THRIFT_TWO_JMX_CONTEXT);
}
@Override
- public ThriftServerMetricsSource createThriftOneSource() {
+ public MetricsThriftServerSource createThriftOneSource() {
return FactoryStorage.INSTANCE.thriftOne;
}
@Override
- public ThriftServerMetricsSource createThriftTwoSource() {
+ public MetricsThriftServerSource createThriftTwoSource() {
return FactoryStorage.INSTANCE.thriftTwo;
}
}
diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/metrics/ThriftServerMetricsSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceImpl.java
similarity index 85%
rename from hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/metrics/ThriftServerMetricsSourceImpl.java
rename to hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceImpl.java
index 5c9348fe4af..40a8a6c0e7f 100644
--- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/metrics/ThriftServerMetricsSourceImpl.java
+++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceImpl.java
@@ -16,18 +16,17 @@
* limitations under the License.
*/
-package org.apache.hadoop.hbase.thrift.metrics;
+package org.apache.hadoop.hbase.thrift;
-import org.apache.hadoop.hbase.metrics.BaseMetricsSourceImpl;
-import org.apache.hadoop.hbase.thrift.metrics.ThriftServerMetricsSource;
+import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
import org.apache.hadoop.metrics2.lib.MutableGaugeLong;
import org.apache.hadoop.metrics2.lib.MutableStat;
/**
- * Hadoop 2 version of ThriftServerMetricsSource{@link ThriftServerMetricsSource}
+ * Hadoop 2 version of MetricsThriftServerSource{@link org.apache.hadoop.hbase.thrift.MetricsThriftServerSource}
*/
-public class ThriftServerMetricsSourceImpl extends BaseMetricsSourceImpl implements
- ThriftServerMetricsSource {
+public class MetricsThriftServerSourceImpl extends BaseSourceImpl implements
+ MetricsThriftServerSource {
private MutableStat batchGetStat;
private MutableStat batchMutateStat;
@@ -38,7 +37,7 @@ public class ThriftServerMetricsSourceImpl extends BaseMetricsSourceImpl impleme
private MutableGaugeLong callQueueLenGauge;
- public ThriftServerMetricsSourceImpl(String metricsName,
+ public MetricsThriftServerSourceImpl(String metricsName,
String metricsDescription,
String metricsContext,
String metricsJmxContext) {
@@ -50,12 +49,12 @@ public class ThriftServerMetricsSourceImpl extends BaseMetricsSourceImpl impleme
super.init();
batchGetStat = getMetricsRegistry().newStat(BATCH_GET_KEY, "", "Keys", "Ops");
batchMutateStat = getMetricsRegistry().newStat(BATCH_MUTATE_KEY, "", "Keys", "Ops");
- queueTimeStat = getMetricsRegistry().newRate(TIME_IN_QUEUE_KEY) ;
+ queueTimeStat = getMetricsRegistry().newRate(TIME_IN_QUEUE_KEY);
thriftCallStat = getMetricsRegistry().newRate(THRIFT_CALL_KEY);
thriftSlowCallStat = getMetricsRegistry().newRate(SLOW_THRIFT_CALL_KEY);
- callQueueLenGauge = getMetricsRegistry().getLongGauge(CALL_QUEUE_LEN_KEY, 0) ;
+ callQueueLenGauge = getMetricsRegistry().getLongGauge(CALL_QUEUE_LEN_KEY, 0);
}
diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java
new file mode 100644
index 00000000000..ce5b9e2eec7
--- /dev/null
+++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java
@@ -0,0 +1,54 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics2.impl;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+
+/**
+ * JMX caches the beans that have been exported; even after the values are removed from hadoop's
+ * metrics system the keys and old values will still remain. This class stops and restarts the
+ * Hadoop metrics system, forcing JMX to clear the cache of exported metrics.
+ *
+ * This class need to be in the o.a.h.metrics2.impl namespace as many of the variables/calls used
+ * are package private.
+ */
+public class JmxCacheBuster {
+ private static final Log LOG = LogFactory.getLog(JmxCacheBuster.class);
+
+ /**
+ * For JMX to forget about all previously exported metrics.
+ */
+ public static void clearJmxCache() {
+ LOG.trace("Clearing JMX mbean cache.");
+
+ // This is pretty extreme but it's the best way that
+ // I could find to get metrics to be removed.
+ try {
+ if (DefaultMetricsSystem.instance() != null ) {
+ DefaultMetricsSystem.instance().stop();
+ DefaultMetricsSystem.instance().start();
+ }
+
+ } catch (Exception exception ) {
+ LOG.debug("error clearing the jmx it appears the metrics system hasn't been started", exception);
+ }
+ }
+}
diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java
index a4238931d81..080bd4d52f0 100644
--- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java
+++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java
@@ -528,6 +528,7 @@ public class DynamicMetricsRegistry {
return returnExistingWithCast(metric, metricClass, name);
}
+ @SuppressWarnings("unchecked")
private T returnExistingWithCast(MutableMetric metric,
Class metricClass, String name) {
if (!metricClass.isAssignableFrom(metric.getClass())) {
diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricMutableQuantiles.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricMutableQuantiles.java
index 28e92c15251..766cf9609f7 100644
--- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricMutableQuantiles.java
+++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricMutableQuantiles.java
@@ -22,8 +22,8 @@ import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.metrics.MetricHistogram;
-import org.apache.hadoop.metrics.MetricsExecutor;
+import org.apache.hadoop.metrics2.MetricHistogram;
+import org.apache.hadoop.metrics2.MetricsExecutor;
import org.apache.hadoop.metrics2.MetricsInfo;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.util.MetricQuantile;
diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricsExecutorImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricsExecutorImpl.java
index 31357581891..d47912c273c 100644
--- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricsExecutorImpl.java
+++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricsExecutorImpl.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.metrics2.lib;
-import org.apache.hadoop.metrics.MetricsExecutor;
+import org.apache.hadoop.metrics2.MetricsExecutor;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledThreadPoolExecutor;
diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableHistogram.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableHistogram.java
index 4fb0be9bfc7..3b012e9001f 100644
--- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableHistogram.java
+++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableHistogram.java
@@ -22,7 +22,7 @@ import com.yammer.metrics.stats.ExponentiallyDecayingSample;
import com.yammer.metrics.stats.Sample;
import com.yammer.metrics.stats.Snapshot;
import org.apache.commons.lang.StringUtils;
-import org.apache.hadoop.metrics.MetricHistogram;
+import org.apache.hadoop.metrics2.MetricHistogram;
import org.apache.hadoop.metrics2.MetricsInfo;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
diff --git a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.MetricsMasterSourceFactory b/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.MetricsMasterSourceFactory
new file mode 100644
index 00000000000..a5e43e4fcd2
--- /dev/null
+++ b/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.MetricsMasterSourceFactory
@@ -0,0 +1 @@
+org.apache.hadoop.hbase.master.MetricsMasterSourceFactoryImpl
\ No newline at end of file
diff --git a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.metrics.MasterMetricsSourceFactory b/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.metrics.MasterMetricsSourceFactory
deleted file mode 100644
index e81c3dcc43f..00000000000
--- a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.metrics.MasterMetricsSourceFactory
+++ /dev/null
@@ -1 +0,0 @@
-org.apache.hadoop.hbase.master.metrics.MasterMetricsSourceFactoryImpl
diff --git a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactory b/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactory
new file mode 100644
index 00000000000..bc2f6430478
--- /dev/null
+++ b/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactory
@@ -0,0 +1 @@
+org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactoryImpl
diff --git a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSource b/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSource
new file mode 100644
index 00000000000..1e0dd200e6f
--- /dev/null
+++ b/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSource
@@ -0,0 +1 @@
+org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceImpl
\ No newline at end of file
diff --git a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.replication.regionserver.metrics.ReplicationMetricsSource b/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.replication.regionserver.metrics.ReplicationMetricsSource
deleted file mode 100644
index bb64ad5ba0d..00000000000
--- a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.replication.regionserver.metrics.ReplicationMetricsSource
+++ /dev/null
@@ -1 +0,0 @@
-org.apache.hadoop.hbase.replication.regionserver.metrics.ReplicationMetricsSourceImpl
\ No newline at end of file
diff --git a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.rest.MetricsRESTSource b/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.rest.MetricsRESTSource
new file mode 100644
index 00000000000..5a4a8e9c044
--- /dev/null
+++ b/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.rest.MetricsRESTSource
@@ -0,0 +1 @@
+org.apache.hadoop.hbase.rest.MetricsRESTSourceImpl
\ No newline at end of file
diff --git a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.rest.metrics.RESTMetricsSource b/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.rest.metrics.RESTMetricsSource
deleted file mode 100644
index 9e7a28d7b9b..00000000000
--- a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.rest.metrics.RESTMetricsSource
+++ /dev/null
@@ -1 +0,0 @@
-org.apache.hadoop.hbase.rest.metrics.RESTMetricsSourceImpl
\ No newline at end of file
diff --git a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactory b/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactory
new file mode 100644
index 00000000000..2b5c16338cd
--- /dev/null
+++ b/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactory
@@ -0,0 +1 @@
+org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactoryImpl
\ No newline at end of file
diff --git a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.thrift.metrics.ThriftServerMetricsSourceFactory b/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.thrift.metrics.ThriftServerMetricsSourceFactory
deleted file mode 100644
index 62d1c6a9325..00000000000
--- a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.thrift.metrics.ThriftServerMetricsSourceFactory
+++ /dev/null
@@ -1 +0,0 @@
-org.apache.hadoop.hbase.thrift.metrics.ThriftServerMetricsSourceFactoryImpl
\ No newline at end of file
diff --git a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.metrics2.MetricsExecutor b/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.metrics2.MetricsExecutor
new file mode 100644
index 00000000000..dc120525ba8
--- /dev/null
+++ b/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.metrics2.MetricsExecutor
@@ -0,0 +1 @@
+org.apache.hadoop.metrics2.lib.MetricsExecutorImpl
diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/master/metrics/TestMasterMetricsSourceImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterSourceImpl.java
similarity index 60%
rename from hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/master/metrics/TestMasterMetricsSourceImpl.java
rename to hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterSourceImpl.java
index fe384d7f405..4cdd60677fe 100644
--- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/master/metrics/TestMasterMetricsSourceImpl.java
+++ b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterSourceImpl.java
@@ -16,26 +16,29 @@
* limitations under the License.
*/
-package org.apache.hadoop.hbase.master.metrics;
+package org.apache.hadoop.hbase.master;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
+import org.apache.hadoop.hbase.master.MetricsMasterSource;
+import org.apache.hadoop.hbase.master.MetricsMasterSourceFactory;
+import org.apache.hadoop.hbase.master.MetricsMasterSourceImpl;
import org.junit.Test;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
/**
- * Test for MasterMetricsSourceImpl
+ * Test for MetricsMasterSourceImpl
*/
-public class TestMasterMetricsSourceImpl {
+public class TestMetricsMasterSourceImpl {
@Test
public void testGetInstance() throws Exception {
- MasterMetricsSourceFactory masterMetricsSourceFactory = CompatibilitySingletonFactory
- .getInstance(MasterMetricsSourceFactory.class);
- MasterMetricsSource masterMetricsSource = masterMetricsSourceFactory.create(null);
- assertTrue(masterMetricsSource instanceof MasterMetricsSourceImpl);
- assertSame(masterMetricsSourceFactory, CompatibilitySingletonFactory.getInstance(MasterMetricsSourceFactory.class));
+ MetricsMasterSourceFactory metricsMasterSourceFactory = CompatibilitySingletonFactory
+ .getInstance(MetricsMasterSourceFactory.class);
+ MetricsMasterSource masterSource = metricsMasterSourceFactory.create(null);
+ assertTrue(masterSource instanceof MetricsMasterSourceImpl);
+ assertSame(metricsMasterSourceFactory, CompatibilitySingletonFactory.getInstance(MetricsMasterSourceFactory.class));
}
}
diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseMetricsSourceImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseSourceImpl.java
similarity index 82%
rename from hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseMetricsSourceImpl.java
rename to hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseSourceImpl.java
index f3347029e1d..3c9d792409c 100644
--- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseMetricsSourceImpl.java
+++ b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseSourceImpl.java
@@ -27,15 +27,15 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
/**
- * Test of default BaseMetricsSource for hadoop 2
+ * Test of default BaseSource for hadoop 2
*/
-public class TestBaseMetricsSourceImpl {
+public class TestBaseSourceImpl {
- private static BaseMetricsSourceImpl bmsi;
+ private static BaseSourceImpl bmsi;
@BeforeClass
public static void setUp() throws Exception {
- bmsi = new BaseMetricsSourceImpl("TestName", "test description", "testcontext", "TestContext");
+ bmsi = new BaseSourceImpl("TestName", "test description", "testcontext", "TestContext");
}
@Test
@@ -75,16 +75,10 @@ public class TestBaseMetricsSourceImpl {
}
@Test
- public void testRemoveGauge() throws Exception {
+ public void testRemoveMetric() throws Exception {
bmsi.setGauge("testrmgauge", 100);
- bmsi.removeGauge("testrmgauge");
+ bmsi.removeMetric("testrmgauge");
assertNull(bmsi.metricsRegistry.get("testrmgauge"));
}
- @Test
- public void testRemoveCounter() throws Exception {
- bmsi.incCounters("testrmcounter", 100);
- bmsi.removeCounter("testrmcounter");
- assertNull(bmsi.metricsRegistry.get("testrmcounter"));
- }
}
diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerSourceImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerSourceImpl.java
new file mode 100644
index 00000000000..e6e16c78c64
--- /dev/null
+++ b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerSourceImpl.java
@@ -0,0 +1,50 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
+import org.junit.Test;
+
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Test for MetricsRegionServerSourceImpl
+ */
+public class TestMetricsRegionServerSourceImpl {
+
+ @Test
+ public void testGetInstance() throws Exception {
+ MetricsRegionServerSourceFactory metricsRegionServerSourceFactory =
+ CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class);
+ MetricsRegionServerSource serverSource =
+ metricsRegionServerSourceFactory.createServer(null);
+ assertTrue(serverSource instanceof MetricsRegionServerSourceImpl);
+ assertSame(metricsRegionServerSourceFactory,
+ CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class));
+ }
+
+
+ @Test(expected = RuntimeException.class)
+ public void testNoGetRegionServerMetricsSourceImpl() throws Exception {
+ // This should throw an exception because MetricsRegionServerSourceImpl should only
+ // be created by a factory.
+ CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceImpl.class);
+ }
+}
diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java
new file mode 100644
index 00000000000..89c0762e6d9
--- /dev/null
+++ b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java
@@ -0,0 +1,101 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+public class TestMetricsRegionSourceImpl {
+
+ @Test
+ public void testCompareTo() throws Exception {
+ MetricsRegionServerSourceFactory fact = CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class);
+
+ MetricsRegionSource one = fact.createRegion(new RegionWrapperStub("TEST"));
+ MetricsRegionSource oneClone = fact.createRegion(new RegionWrapperStub("TEST"));
+ MetricsRegionSource two = fact.createRegion(new RegionWrapperStub("TWO"));
+
+ assertEquals(0, one.compareTo(oneClone));
+
+ assertTrue( one.compareTo(two) < 0);
+ assertTrue( two.compareTo(one) > 0);
+ }
+
+
+ @Test(expected = RuntimeException.class)
+ public void testNoGetRegionServerMetricsSourceImpl() throws Exception {
+ // This should throw an exception because MetricsRegionSourceImpl should only
+ // be created by a factory.
+ CompatibilitySingletonFactory.getInstance(MetricsRegionSource.class);
+ }
+
+ class RegionWrapperStub implements MetricsRegionWrapper {
+
+ private String regionName;
+
+ public RegionWrapperStub(String regionName) {
+
+
+ this.regionName = regionName;
+ }
+
+ @Override
+ public String getTableName() {
+ return null; //To change body of implemented methods use File | Settings | File Templates.
+ }
+
+ @Override
+ public String getRegionName() {
+ return this.regionName;
+ }
+
+ @Override
+ public long getNumStores() {
+ return 0; //To change body of implemented methods use File | Settings | File Templates.
+ }
+
+ @Override
+ public long getNumStoreFiles() {
+ return 0; //To change body of implemented methods use File | Settings | File Templates.
+ }
+
+ @Override
+ public long getMemstoreSize() {
+ return 0; //To change body of implemented methods use File | Settings | File Templates.
+ }
+
+ @Override
+ public long getStoreFileSize() {
+ return 0; //To change body of implemented methods use File | Settings | File Templates.
+ }
+
+ @Override
+ public long getReadRequestCount() {
+ return 0; //To change body of implemented methods use File | Settings | File Templates.
+ }
+
+ @Override
+ public long getWriteRequestCount() {
+ return 0; //To change body of implemented methods use File | Settings | File Templates.
+ }
+ }
+}
diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/metrics/TestReplicationMetricsSourceImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceImpl.java
similarity index 66%
rename from hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/metrics/TestReplicationMetricsSourceImpl.java
rename to hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceImpl.java
index 04248e0c36b..bd7f3dde4e1 100644
--- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/metrics/TestReplicationMetricsSourceImpl.java
+++ b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceImpl.java
@@ -16,20 +16,22 @@
* limitations under the License.
*/
-package org.apache.hadoop.hbase.replication.regionserver.metrics;
+package org.apache.hadoop.hbase.replication.regionserver;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
+import org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSource;
+import org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceImpl;
import org.junit.Test;
import static org.junit.Assert.assertTrue;
-/** Test for ReplicationMetricsSourceImpl */
-public class TestReplicationMetricsSourceImpl {
+/** Test for MetricsReplicationSourceImpl */
+public class TestMetricsReplicationSourceImpl {
@Test
public void testGetInstance() throws Exception {
- ReplicationMetricsSource rms = CompatibilitySingletonFactory
- .getInstance(ReplicationMetricsSource.class);
- assertTrue(rms instanceof ReplicationMetricsSourceImpl);
+ MetricsReplicationSource rms = CompatibilitySingletonFactory
+ .getInstance(MetricsReplicationSource.class);
+ assertTrue(rms instanceof MetricsReplicationSourceImpl);
}
}
diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/rest/metrics/TestRESTMetricsSourceImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/rest/TestMetricsRESTSourceImpl.java
similarity index 70%
rename from hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/rest/metrics/TestRESTMetricsSourceImpl.java
rename to hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/rest/TestMetricsRESTSourceImpl.java
index cc9c82d6a81..5f4e70baf1a 100644
--- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/rest/metrics/TestRESTMetricsSourceImpl.java
+++ b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/rest/TestMetricsRESTSourceImpl.java
@@ -16,23 +16,25 @@
* limitations under the License.
*/
-package org.apache.hadoop.hbase.rest.metrics;
+package org.apache.hadoop.hbase.rest;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
+import org.apache.hadoop.hbase.rest.MetricsRESTSource;
+import org.apache.hadoop.hbase.rest.MetricsRESTSourceImpl;
import org.junit.Test;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
/**
- * Test for hadoop 2's version of RESTMetricsSource
+ * Test for hadoop 2's version of MetricsRESTSource
*/
-public class TestRESTMetricsSourceImpl {
+public class TestMetricsRESTSourceImpl {
@Test
public void ensureCompatRegistered() throws Exception {
- assertNotNull(CompatibilitySingletonFactory.getInstance(RESTMetricsSource.class));
- assertTrue(CompatibilitySingletonFactory.getInstance(RESTMetricsSource.class) instanceof RESTMetricsSourceImpl);
+ assertNotNull(CompatibilitySingletonFactory.getInstance(MetricsRESTSource.class));
+ assertTrue(CompatibilitySingletonFactory.getInstance(MetricsRESTSource.class) instanceof MetricsRESTSourceImpl);
}
}
diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelperImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelperImpl.java
index b8b06ab4504..29c74de9259 100644
--- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelperImpl.java
+++ b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelperImpl.java
@@ -18,8 +18,8 @@
package org.apache.hadoop.hbase.test;
-import org.apache.hadoop.hbase.metrics.BaseMetricsSource;
-import org.apache.hadoop.hbase.metrics.BaseMetricsSourceImpl;
+import org.apache.hadoop.hbase.metrics.BaseSource;
+import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
import org.apache.hadoop.metrics2.AbstractMetric;
import org.apache.hadoop.metrics2.MetricsCollector;
import org.apache.hadoop.metrics2.MetricsInfo;
@@ -129,68 +129,68 @@ public class MetricsAssertHelperImpl implements MetricsAssertHelper {
}
@Override
- public void assertTag(String name, String expected, BaseMetricsSource source) {
+ public void assertTag(String name, String expected, BaseSource source) {
getMetrics(source);
String cName = canonicalizeMetricName(name);
assertEquals("Tags should be equal", expected, tags.get(cName));
}
@Override
- public void assertGauge(String name, long expected, BaseMetricsSource source) {
+ public void assertGauge(String name, long expected, BaseSource source) {
long found = getGaugeLong(name, source);
assertEquals("Metrics Should be equal", (long) Long.valueOf(expected), found);
}
@Override
- public void assertGaugeGt(String name, long expected, BaseMetricsSource source) {
+ public void assertGaugeGt(String name, long expected, BaseSource source) {
double found = getGaugeDouble(name, source);
assertTrue(name + " (" + found + ") should be greater than " + expected, found > expected);
}
@Override
- public void assertGaugeLt(String name, long expected, BaseMetricsSource source) {
+ public void assertGaugeLt(String name, long expected, BaseSource source) {
double found = getGaugeDouble(name, source);
assertTrue(name + "(" + found + ") should be less than " + expected, found < expected);
}
@Override
- public void assertGauge(String name, double expected, BaseMetricsSource source) {
+ public void assertGauge(String name, double expected, BaseSource source) {
double found = getGaugeDouble(name, source);
- assertEquals("Metrics Should be equal", (double) Double.valueOf(expected), found);
+ assertEquals("Metrics Should be equal", (double) Double.valueOf(expected), found, 0.01);
}
@Override
- public void assertGaugeGt(String name, double expected, BaseMetricsSource source) {
+ public void assertGaugeGt(String name, double expected, BaseSource source) {
double found = getGaugeDouble(name, source);
assertTrue(name + "(" + found + ") should be greater than " + expected, found > expected);
}
@Override
- public void assertGaugeLt(String name, double expected, BaseMetricsSource source) {
+ public void assertGaugeLt(String name, double expected, BaseSource source) {
double found = getGaugeDouble(name, source);
assertTrue(name + "(" + found + ") should be less than " + expected, found < expected);
}
@Override
- public void assertCounter(String name, long expected, BaseMetricsSource source) {
+ public void assertCounter(String name, long expected, BaseSource source) {
long found = getCounter(name, source);
assertEquals("Metrics Counters should be equal", (long) Long.valueOf(expected), found);
}
@Override
- public void assertCounterGt(String name, long expected, BaseMetricsSource source) {
+ public void assertCounterGt(String name, long expected, BaseSource source) {
long found = getCounter(name, source);
assertTrue(name + " (" + found + ") should be greater than " + expected, found > expected);
}
@Override
- public void assertCounterLt(String name, long expected, BaseMetricsSource source) {
+ public void assertCounterLt(String name, long expected, BaseSource source) {
long found = getCounter(name, source);
assertTrue(name + "(" + found + ") should be less than " + expected, found < expected);
}
@Override
- public long getCounter(String name, BaseMetricsSource source) {
+ public long getCounter(String name, BaseSource source) {
getMetrics(source);
String cName = canonicalizeMetricName(name);
assertNotNull(counters.get(cName));
@@ -198,7 +198,7 @@ public class MetricsAssertHelperImpl implements MetricsAssertHelper {
}
@Override
- public double getGaugeDouble(String name, BaseMetricsSource source) {
+ public double getGaugeDouble(String name, BaseSource source) {
getMetrics(source);
String cName = canonicalizeMetricName(name);
assertNotNull(gauges.get(cName));
@@ -206,7 +206,7 @@ public class MetricsAssertHelperImpl implements MetricsAssertHelper {
}
@Override
- public long getGaugeLong(String name, BaseMetricsSource source) {
+ public long getGaugeLong(String name, BaseSource source) {
getMetrics(source);
String cName = canonicalizeMetricName(name);
assertNotNull(gauges.get(cName));
@@ -220,12 +220,12 @@ public class MetricsAssertHelperImpl implements MetricsAssertHelper {
counters.clear();
}
- private void getMetrics(BaseMetricsSource source) {
+ private void getMetrics(BaseSource source) {
reset();
- if (!(source instanceof BaseMetricsSourceImpl)) {
+ if (!(source instanceof BaseSourceImpl)) {
assertTrue(false);
}
- BaseMetricsSourceImpl impl = (BaseMetricsSourceImpl) source;
+ BaseSourceImpl impl = (BaseSourceImpl) source;
impl.getMetrics(new MockMetricsBuilder(), true);
diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/thrift/metrics/TestThriftServerMetricsSourceFactoryImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestMetricsThriftServerSourceFactoryImpl.java
similarity index 64%
rename from hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/thrift/metrics/TestThriftServerMetricsSourceFactoryImpl.java
rename to hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestMetricsThriftServerSourceFactoryImpl.java
index c66c36d038a..c9eda58d9ea 100644
--- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/thrift/metrics/TestThriftServerMetricsSourceFactoryImpl.java
+++ b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestMetricsThriftServerSourceFactoryImpl.java
@@ -16,9 +16,11 @@
* limitations under the License.
*/
-package org.apache.hadoop.hbase.thrift.metrics;
+package org.apache.hadoop.hbase.thrift;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
+import org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactory;
+import org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactoryImpl;
import org.junit.Test;
import static org.junit.Assert.assertNotNull;
@@ -26,28 +28,28 @@ import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
/**
- * Test for hadoop 2's version of ThriftServerMetricsSourceFactory
+ * Test for hadoop 2's version of MetricsThriftServerSourceFactory
*/
-public class TestThriftServerMetricsSourceFactoryImpl {
+public class TestMetricsThriftServerSourceFactoryImpl {
@Test
public void testCompatabilityRegistered() throws Exception {
- assertNotNull(CompatibilitySingletonFactory.getInstance(ThriftServerMetricsSourceFactory.class));
- assertTrue(CompatibilitySingletonFactory.getInstance(ThriftServerMetricsSourceFactory.class) instanceof ThriftServerMetricsSourceFactoryImpl);
+ assertNotNull(CompatibilitySingletonFactory.getInstance(MetricsThriftServerSourceFactory.class));
+ assertTrue(CompatibilitySingletonFactory.getInstance(MetricsThriftServerSourceFactory.class) instanceof MetricsThriftServerSourceFactoryImpl);
}
@Test
public void testCreateThriftOneSource() throws Exception {
//Make sure that the factory gives back a singleton.
- assertSame(new ThriftServerMetricsSourceFactoryImpl().createThriftOneSource(),
- new ThriftServerMetricsSourceFactoryImpl().createThriftOneSource());
+ assertSame(new MetricsThriftServerSourceFactoryImpl().createThriftOneSource(),
+ new MetricsThriftServerSourceFactoryImpl().createThriftOneSource());
}
@Test
public void testCreateThriftTwoSource() throws Exception {
//Make sure that the factory gives back a singleton.
- assertSame(new ThriftServerMetricsSourceFactoryImpl().createThriftTwoSource(),
- new ThriftServerMetricsSourceFactoryImpl().createThriftTwoSource());
+ assertSame(new MetricsThriftServerSourceFactoryImpl().createThriftTwoSource(),
+ new MetricsThriftServerSourceFactoryImpl().createThriftTwoSource());
}
}
diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon
index ee66fdd9231..df20f3cd273 100644
--- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon
+++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon
@@ -24,7 +24,6 @@ String format = "html";
<%import>
java.util.*;
org.apache.hadoop.hbase.regionserver.HRegionServer;
-org.apache.hadoop.hbase.regionserver.metrics.RegionServerMetrics;
org.apache.hadoop.hbase.util.Bytes;
org.apache.hadoop.hbase.HRegionInfo;
org.apache.hadoop.hbase.ServerName;
@@ -38,7 +37,6 @@ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad;
<%java return; %>
%if>
<%java>
- RegionServerMetrics metrics = regionServer.getMetrics();
ServerInfo serverInfo = ProtobufUtil.getServerInfo(regionServer);
ServerName serverName = ProtobufUtil.toServerName(serverInfo.getServerName());
List onlineRegions = ProtobufUtil.getOnlineRegions(regionServer);
@@ -98,7 +96,7 @@ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad;
Server Metrics
- <& ServerMetricsTmpl; metrics = metrics; &>
+ <& ServerMetricsTmpl; mWrap = regionServer.getMetrics().getRegionServerWrapper(); &>
<& ../common/TaskMonitorTmpl; filter = filter &>
diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
index ac0fe6f753c..997793984e2 100644
--- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
+++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
@@ -23,7 +23,6 @@
<%import>
java.util.*;
org.apache.hadoop.hbase.regionserver.HRegionServer;
- org.apache.hadoop.hbase.regionserver.metrics.RegionServerMetrics;
org.apache.hadoop.hbase.util.Bytes;
org.apache.hadoop.hbase.HRegionInfo;
org.apache.hadoop.hbase.ServerName;
diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon
index 0478c1592c9..0f1c5f43d95 100644
--- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon
+++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon
@@ -17,12 +17,12 @@ See the License for the specific language governing permissions and
limitations under the License.
%doc>
<%args>
- RegionServerMetrics metrics;
+MetricsRegionServerWrapper mWrap;
%args>
<%import>
java.util.*;
org.apache.hadoop.hbase.regionserver.HRegionServer;
-org.apache.hadoop.hbase.regionserver.metrics.RegionServerMetrics;
+org.apache.hadoop.hbase.regionserver.MetricsRegionServerWrapper;
org.apache.hadoop.hbase.util.Bytes;
org.apache.hadoop.hbase.HRegionInfo;
org.apache.hadoop.hbase.ServerName;
@@ -42,36 +42,32 @@ java.lang.management.ManagementFactory;
-%def>
+%def>
\ No newline at end of file
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileReader.java
index 8b6e4dc29ab..26ce6d12b6f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileReader.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileReader.java
@@ -30,15 +30,13 @@ import org.apache.hadoop.hbase.fs.HFileSystem;
import org.apache.hadoop.hbase.io.compress.Compression;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo;
-import org.apache.hadoop.hbase.regionserver.metrics.SchemaConfigured;
import org.apache.hadoop.io.RawComparator;
/**
* Common functionality needed by all versions of {@link HFile} readers.
*/
@InterfaceAudience.Private
-public abstract class AbstractHFileReader extends SchemaConfigured
- implements HFile.Reader {
+public abstract class AbstractHFileReader implements HFile.Reader {
/** Filesystem-level block reader for this HFile format version. */
protected HFileBlock.FSReader fsBlockReader;
@@ -119,7 +117,6 @@ public abstract class AbstractHFileReader extends SchemaConfigured
final long fileSize,
final boolean closeIStream,
final CacheConfig cacheConf, final HFileSystem hfs) {
- super(null, path);
this.trailer = trailer;
this.compressAlgo = trailer.getCompressionCodec();
this.cacheConf = cacheConf;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java
index 1ce4683fb3a..000e11a9b0e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java
@@ -34,7 +34,6 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue.KeyComparator;
import org.apache.hadoop.hbase.io.compress.Compression;
import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo;
-import org.apache.hadoop.hbase.regionserver.metrics.SchemaConfigured;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.io.RawComparator;
@@ -44,8 +43,7 @@ import org.apache.hadoop.io.Writable;
* Common functionality needed by all versions of {@link HFile} writers.
*/
@InterfaceAudience.Private
-public abstract class AbstractHFileWriter extends SchemaConfigured
- implements HFile.Writer {
+public abstract class AbstractHFileWriter implements HFile.Writer {
/** Key previously appended. Becomes the last key in the file. */
protected byte[] lastKeyBuffer = null;
@@ -116,7 +114,6 @@ public abstract class AbstractHFileWriter extends SchemaConfigured
Compression.Algorithm compressAlgo,
HFileDataBlockEncoder dataBlockEncoder,
KeyComparator comparator) {
- super(null, path);
this.outputStream = outputStream;
this.path = path;
this.name = path != null ? path.getName() : outputStream.toString();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/Cacheable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/Cacheable.java
index c6b12ebc431..b205106b401 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/Cacheable.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/Cacheable.java
@@ -23,7 +23,6 @@ import java.nio.ByteBuffer;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.io.HeapSize;
-import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
/**
* Cacheable is an interface that allows for an object to be cached. If using an
@@ -57,14 +56,4 @@ public interface Cacheable extends HeapSize {
*/
public CacheableDeserializer getDeserializer();
- /**
- * @return the block type of this cached HFile block
- */
- public BlockType getBlockType();
-
- /**
- * @return the metrics object identified by table and column family
- */
- public SchemaMetrics getSchemaMetrics();
-
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
index a642f012f3e..376dc23ce13 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
@@ -49,8 +49,6 @@ import org.apache.hadoop.hbase.fs.HFileSystem;
import org.apache.hadoop.hbase.io.HbaseMapWritable;
import org.apache.hadoop.hbase.io.compress.Compression;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
-import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics.SchemaAware;
import org.apache.hadoop.hbase.util.BloomFilterWriter;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ChecksumType;
@@ -284,8 +282,6 @@ public class HFile {
/** @return the path to this {@link HFile} */
Path getPath();
- String getColumnFamilyName();
-
void appendMetaBlock(String bloomFilterMetaKey, Writable metaWriter);
/**
@@ -431,7 +427,6 @@ public class HFile {
*/
public static final WriterFactory getWriterFactory(Configuration conf,
CacheConfig cacheConf) {
- SchemaMetrics.configureGlobally(conf);
int version = getFormatVersion(conf);
switch (version) {
case 1:
@@ -453,8 +448,7 @@ public class HFile {
}
/** An interface used by clients to open and iterate an {@link HFile}. */
- public interface Reader extends Closeable, CachingBlockReader,
- SchemaAware {
+ public interface Reader extends Closeable, CachingBlockReader {
/**
* Returns this reader's "name". Usually the last component of the path.
* Needs to be constant as the file is being moved to support caching on
@@ -462,8 +456,6 @@ public class HFile {
*/
String getName();
- String getColumnFamilyName();
-
RawComparator getComparator();
HFileScanner getScanner(boolean cacheBlocks,
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
index d6b65a1ac26..7a07b980428 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
@@ -42,7 +42,6 @@ import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultDecodingContext;
import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext;
import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext;
import org.apache.hadoop.hbase.regionserver.MemStore;
-import org.apache.hadoop.hbase.regionserver.metrics.SchemaConfigured;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ChecksumType;
import org.apache.hadoop.hbase.util.ClassSize;
@@ -85,7 +84,7 @@ import com.google.common.base.Preconditions;
* except that the data section is always uncompressed in the cache.
*/
@InterfaceAudience.Private
-public class HFileBlock extends SchemaConfigured implements Cacheable {
+public class HFileBlock implements Cacheable {
/** Minor versions starting with this number have hbase checksums */
static final int MINOR_VERSION_WITH_CHECKSUM = 1;
@@ -539,8 +538,7 @@ public class HFileBlock extends SchemaConfigured implements Cacheable {
@Override
public long heapSize() {
long size = ClassSize.align(
- // Base class size, including object overhead.
- SCHEMA_CONFIGURED_UNALIGNED_HEAP_SIZE +
+ ClassSize.OBJECT +
// Block type and byte buffer references
2 * ClassSize.REFERENCE +
// On-disk size, uncompressed size, and next block's on-disk size
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
index 7fbc06da71b..c7cf8742e64 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
@@ -41,7 +41,6 @@ import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.io.HeapSize;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
import org.apache.hadoop.hbase.io.hfile.HFile.CachingBlockReader;
-import org.apache.hadoop.hbase.regionserver.metrics.SchemaConfigured;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ClassSize;
import org.apache.hadoop.hbase.util.CompoundBloomFilterWriter;
@@ -719,8 +718,7 @@ public class HFileBlockIndex {
* index. However, in most practical cases we will only have leaf-level
* blocks and the root index, or just the root index.
*/
- public static class BlockIndexWriter extends SchemaConfigured
- implements InlineBlockWriter {
+ public static class BlockIndexWriter implements InlineBlockWriter {
/**
* While the index is being written, this represents the current block
* index referencing all leaf blocks, with one exception. If the file is
@@ -954,7 +952,6 @@ public class HFileBlockIndex {
if (blockCache != null) {
HFileBlock blockForCaching = blockWriter.getBlockForCaching();
- passSchemaMetricsTo(blockForCaching);
blockCache.cacheBlock(new BlockCacheKey(nameForCaching,
beginOffset, DataBlockEncoding.NONE,
blockForCaching.getBlockType()), blockForCaching);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoderImpl.java
index b83ef39a496..eb50a3cc961 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoderImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoderImpl.java
@@ -243,7 +243,6 @@ public class HFileDataBlockEncoderImpl implements HFileDataBlockEncoder {
includesMemstoreTS, block.getMinorVersion(),
block.getBytesPerChecksum(), block.getChecksumType(),
block.getOnDiskDataSizeWithHeader());
- block.passSchemaMetricsTo(encodedBlock);
return encodedBlock;
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
index e1882b380c8..be0fb17f731 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
@@ -43,7 +43,6 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo;
import org.apache.hadoop.hbase.regionserver.TimeRangeTracker;
-import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
import org.apache.hadoop.hbase.util.BloomFilter;
import org.apache.hadoop.hbase.util.BloomFilterFactory;
import org.apache.hadoop.hbase.util.ByteBloomFilter;
@@ -174,7 +173,6 @@ public class HFilePrettyPrinter {
conf.get(org.apache.hadoop.hbase.HConstants.HBASE_DIR));
conf.set("fs.default.name",
conf.get(org.apache.hadoop.hbase.HConstants.HBASE_DIR));
- SchemaMetrics.configureGlobally(conf);
try {
if (!parseOptions(args))
return 1;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV1.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV1.java
index 56339da36bd..436d0c4f1d1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV1.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV1.java
@@ -34,7 +34,6 @@ import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory;
import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo;
import org.apache.hadoop.hbase.io.hfile.HFile.Writer;
-import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.RawComparator;
@@ -235,8 +234,6 @@ public class HFileReaderV1 extends AbstractHFileReader {
cacheConf.shouldCacheBlockOnRead(effectiveCategory));
if (cachedBlock != null) {
cacheHits.incrementAndGet();
- getSchemaMetrics().updateOnCacheHit(effectiveCategory,
- SchemaMetrics.NO_COMPACTION);
return cachedBlock.getBufferWithoutHeader();
}
// Cache Miss, please load.
@@ -245,13 +242,10 @@ public class HFileReaderV1 extends AbstractHFileReader {
HFileBlock hfileBlock = fsBlockReader.readBlockData(offset,
nextOffset - offset, metaBlockIndexReader.getRootBlockDataSize(block),
true);
- passSchemaMetricsTo(hfileBlock);
hfileBlock.expectType(BlockType.META);
final long delta = System.nanoTime() - startTimeNs;
HFile.offerReadLatency(delta, true);
- getSchemaMetrics().updateOnCacheMiss(effectiveCategory,
- SchemaMetrics.NO_COMPACTION, delta);
// Cache the block
if (cacheBlock && cacheConf.shouldCacheBlockOnRead(effectiveCategory)) {
@@ -300,8 +294,6 @@ public class HFileReaderV1 extends AbstractHFileReader {
cacheConf.shouldCacheDataOnRead());
if (cachedBlock != null) {
cacheHits.incrementAndGet();
- getSchemaMetrics().updateOnCacheHit(
- cachedBlock.getBlockType().getCategory(), isCompaction);
return cachedBlock.getBufferWithoutHeader();
}
// Carry on, please load.
@@ -323,13 +315,10 @@ public class HFileReaderV1 extends AbstractHFileReader {
HFileBlock hfileBlock = fsBlockReader.readBlockData(offset, nextOffset
- offset, dataBlockIndexReader.getRootBlockDataSize(block), pread);
- passSchemaMetricsTo(hfileBlock);
hfileBlock.expectType(BlockType.DATA);
final long delta = System.nanoTime() - startTimeNs;
HFile.offerReadLatency(delta, pread);
- getSchemaMetrics().updateOnCacheMiss(BlockCategory.DATA, isCompaction,
- delta);
// Cache the block
if (cacheBlock && cacheConf.shouldCacheBlockOnRead(
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
index e252f38a3d7..71e4d09cb6c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
@@ -226,7 +226,6 @@ public class HFileReaderV2 extends AbstractHFileReader {
// Return a distinct 'shallow copy' of the block,
// so pos does not get messed by the scanner
cacheHits.incrementAndGet();
- getSchemaMetrics().updateOnCacheHit(BlockCategory.META, false);
return cachedBlock.getBufferWithoutHeader();
}
// Cache Miss, please load.
@@ -234,11 +233,9 @@ public class HFileReaderV2 extends AbstractHFileReader {
HFileBlock metaBlock = fsBlockReader.readBlockData(metaBlockOffset,
blockSize, -1, true);
- passSchemaMetricsTo(metaBlock);
final long delta = System.nanoTime() - startTimeNs;
HFile.offerReadLatency(delta, true);
- getSchemaMetrics().updateOnCacheMiss(BlockCategory.META, false, delta);
// Cache the block
if (cacheBlock) {
@@ -302,7 +299,6 @@ public class HFileReaderV2 extends AbstractHFileReader {
cachedBlock.getBlockType().getCategory();
cacheHits.incrementAndGet();
- getSchemaMetrics().updateOnCacheHit(blockCategory, isCompaction);
if (cachedBlock.getBlockType() == BlockType.DATA) {
HFile.dataBlockReadCnt.incrementAndGet();
@@ -331,12 +327,10 @@ public class HFileReaderV2 extends AbstractHFileReader {
hfileBlock = dataBlockEncoder.diskToCacheFormat(hfileBlock,
isCompaction);
validateBlockType(hfileBlock, expectedBlockType);
- passSchemaMetricsTo(hfileBlock);
BlockCategory blockCategory = hfileBlock.getBlockType().getCategory();
final long delta = System.nanoTime() - startTimeNs;
HFile.offerReadLatency(delta, pread);
- getSchemaMetrics().updateOnCacheMiss(blockCategory, isCompaction, delta);
// Cache the block if necessary
if (cacheBlock && cacheConf.shouldCacheBlockOnRead(
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV1.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV1.java
index 07a58681ec7..d272255bc21 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV1.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV1.java
@@ -41,7 +41,6 @@ import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo;
import org.apache.hadoop.hbase.io.hfile.HFile.Writer;
import org.apache.hadoop.hbase.regionserver.MemStore;
-import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
import org.apache.hadoop.hbase.util.ChecksumType;
import org.apache.hadoop.hbase.util.BloomFilterWriter;
import org.apache.hadoop.hbase.util.Bytes;
@@ -109,7 +108,6 @@ public class HFileWriterV1 extends AbstractHFileWriter {
final KeyComparator comparator) throws IOException {
super(cacheConf, ostream == null ? createOutputStream(conf, fs, path) : ostream, path,
blockSize, compress, blockEncoder, comparator);
- SchemaMetrics.configureGlobally(conf);
}
/**
@@ -158,7 +156,6 @@ public class HFileWriterV1 extends AbstractHFileWriter {
HFileBlock.HEADER_SIZE_NO_CHECKSUM); // onDiskDataSizeWithHeader
block = blockEncoder.diskToCacheFormat(block, false);
- passSchemaMetricsTo(block);
cacheConf.getBlockCache().cacheBlock(
new BlockCacheKey(name, blockBegin, DataBlockEncoding.NONE,
block.getBlockType()), block);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java
index 1b05138e119..2d7b0d8a074 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java
@@ -37,7 +37,6 @@ import org.apache.hadoop.hbase.KeyValue.KeyComparator;
import org.apache.hadoop.hbase.io.compress.Compression;
import org.apache.hadoop.hbase.io.hfile.HFile.Writer;
import org.apache.hadoop.hbase.io.hfile.HFileBlock.BlockWritable;
-import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
import org.apache.hadoop.hbase.util.ChecksumType;
import org.apache.hadoop.hbase.util.BloomFilterWriter;
import org.apache.hadoop.hbase.util.Bytes;
@@ -114,7 +113,6 @@ public class HFileWriterV2 extends AbstractHFileWriter {
super(cacheConf,
ostream == null ? createOutputStream(conf, fs, path) : ostream,
path, blockSize, compressAlgo, blockEncoder, comparator);
- SchemaMetrics.configureGlobally(conf);
this.checksumType = checksumType;
this.bytesPerChecksum = bytesPerChecksum;
finishInit(conf);
@@ -141,16 +139,6 @@ public class HFileWriterV2 extends AbstractHFileWriter {
// Meta data block index writer
metaBlockIndexWriter = new HFileBlockIndex.BlockIndexWriter();
LOG.debug("Initialized with " + cacheConf);
-
- if (isSchemaConfigured()) {
- schemaConfigurationChanged();
- }
- }
-
- @Override
- protected void schemaConfigurationChanged() {
- passSchemaMetricsTo(dataBlockIndexWriter);
- passSchemaMetricsTo(metaBlockIndexWriter);
}
/**
@@ -227,7 +215,6 @@ public class HFileWriterV2 extends AbstractHFileWriter {
final boolean isCompaction = false;
HFileBlock cacheFormatBlock = blockEncoder.diskToCacheFormat(
fsBlockWriter.getBlockForCaching(), isCompaction);
- passSchemaMetricsTo(cacheFormatBlock);
cacheConf.getBlockCache().cacheBlock(
new BlockCacheKey(name, offset, blockEncoder.getEncodingInCache(),
cacheFormatBlock.getBlockType()), cacheFormatBlock);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
index 8a6929baf56..74b6212df4c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
@@ -44,7 +44,6 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.io.HeapSize;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ClassSize;
import org.apache.hadoop.hbase.util.FSUtils;
@@ -326,12 +325,6 @@ public class LruBlockCache implements BlockCache, HeapSize {
if (evict) {
heapsize *= -1;
}
- Cacheable cachedBlock = cb.getBuffer();
- SchemaMetrics schemaMetrics = cachedBlock.getSchemaMetrics();
- if (schemaMetrics != null) {
- schemaMetrics.updateOnCachePutOrEvict(
- cachedBlock.getBlockType().getCategory(), heapsize, evict);
- }
return size.addAndGet(heapsize);
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
index 76da09f53ab..12668e9edb0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
@@ -65,7 +65,6 @@ import org.apache.hadoop.hbase.master.handler.DisableTableHandler;
import org.apache.hadoop.hbase.master.handler.EnableTableHandler;
import org.apache.hadoop.hbase.master.handler.OpenedRegionHandler;
import org.apache.hadoop.hbase.master.handler.SplitRegionHandler;
-import org.apache.hadoop.hbase.master.metrics.MasterMetrics;
import org.apache.hadoop.hbase.regionserver.RegionAlreadyInTransitionException;
import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
@@ -152,7 +151,7 @@ public class AssignmentManager extends ZooKeeperListener {
EventType.RS_ZK_REGION_FAILED_OPEN, EventType.RS_ZK_REGION_CLOSED });
// metrics instance to send metrics for RITs
- MasterMetrics masterMetrics;
+ MetricsMaster metricsMaster;
private final RegionStates regionStates;
@@ -176,7 +175,7 @@ public class AssignmentManager extends ZooKeeperListener {
*/
public AssignmentManager(Server server, ServerManager serverManager,
CatalogTracker catalogTracker, final LoadBalancer balancer,
- final ExecutorService service, MasterMetrics metrics) throws KeeperException, IOException {
+ final ExecutorService service, MetricsMaster metricsMaster) throws KeeperException, IOException {
super(server.getZooKeeper());
this.server = server;
this.serverManager = serverManager;
@@ -200,7 +199,7 @@ public class AssignmentManager extends ZooKeeperListener {
int maxThreads = conf.getInt("hbase.assignment.threads.max", 30);
this.threadPoolExecutorService = Threads.getBoundedCachedThreadPool(
maxThreads, 60L, TimeUnit.SECONDS, Threads.newDaemonThreadFactory("hbase-am"));
- this.masterMetrics = metrics;// can be null only with tests.
+ this.metricsMaster = metricsMaster;// can be null only with tests.
this.regionStates = new RegionStates(server, serverManager);
int workers = conf.getInt("hbase.assignment.zkevent.workers", 5);
@@ -2343,10 +2342,10 @@ public class AssignmentManager extends ZooKeeperListener {
oldestRITTime = ritTime;
}
}
- if (this.masterMetrics != null) {
- this.masterMetrics.updateRITOldestAge(oldestRITTime);
- this.masterMetrics.updateRITCount(totalRITs);
- this.masterMetrics.updateRITCountOverThreshold(totalRITsOverThreshold);
+ if (this.metricsMaster != null) {
+ this.metricsMaster.updateRITOldestAge(oldestRITTime);
+ this.metricsMaster.updateRITCount(totalRITs);
+ this.metricsMaster.updateRITCountOverThreshold(totalRITsOverThreshold);
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index a28d64a2a61..758dd3013fd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.Chore;
+import org.apache.hadoop.hbase.ClusterId;
import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.DeserializationException;
import org.apache.hadoop.hbase.HColumnDescriptor;
@@ -96,8 +97,6 @@ import org.apache.hadoop.hbase.master.handler.TableAddFamilyHandler;
import org.apache.hadoop.hbase.master.handler.TableDeleteFamilyHandler;
import org.apache.hadoop.hbase.master.handler.TableEventHandler;
import org.apache.hadoop.hbase.master.handler.TableModifyFamilyHandler;
-import org.apache.hadoop.hbase.master.metrics.MasterMetrics;
-import org.apache.hadoop.hbase.master.metrics.MasterMetricsWrapperImpl;
import org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer;
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.monitoring.TaskMonitor;
@@ -247,7 +246,7 @@ Server {
private final InetSocketAddress isa;
// Metrics for the HMaster
- private final MasterMetrics metrics;
+ private final MetricsMaster metricsMaster;
// file system manager for the master FS operations
private MasterFileSystem fileSystemManager;
@@ -383,7 +382,7 @@ Server {
//should we check the compression codec type at master side, default true, HBASE-6370
this.masterCheckCompression = conf.getBoolean("hbase.master.check.compression", true);
- this.metrics = new MasterMetrics( new MasterMetricsWrapperImpl(this));
+ this.metricsMaster = new MetricsMaster( new MetricsMasterWrapperImpl(this));
}
/**
@@ -413,8 +412,8 @@ Server {
}
- MasterMetrics getMetrics() {
- return metrics;
+ MetricsMaster getMetrics() {
+ return metricsMaster;
}
/**
@@ -523,7 +522,7 @@ Server {
this.loadBalancerTracker = new LoadBalancerTracker(zooKeeper, this);
this.loadBalancerTracker.start();
this.assignmentManager = new AssignmentManager(this, serverManager,
- this.catalogTracker, this.balancer, this.executorService, this.metrics);
+ this.catalogTracker, this.balancer, this.executorService, this.metricsMaster);
zooKeeper.registerListenerFirst(assignmentManager);
this.regionServerTracker = new RegionServerTracker(zooKeeper, this,
@@ -627,7 +626,7 @@ Server {
status.setStatus("Initializing Master file system");
this.masterActiveTime = System.currentTimeMillis();
// TODO: Do this using Dependency Injection, using PicoContainer, Guice or Spring.
- this.fileSystemManager = new MasterFileSystem(this, this, metrics, masterRecovery);
+ this.fileSystemManager = new MasterFileSystem(this, this, metricsMaster, masterRecovery);
this.tableDescriptors =
new FSTableDescriptors(this.fileSystemManager.getFileSystem(),
@@ -1182,9 +1181,9 @@ Server {
try {
HBaseProtos.ServerLoad sl = request.getLoad();
this.serverManager.regionServerReport(ProtobufUtil.toServerName(request.getServer()), new ServerLoad(sl));
- if (sl != null && this.metrics != null) {
+ if (sl != null && this.metricsMaster != null) {
// Up our metrics.
- this.metrics.incrementRequests(sl.getTotalNumberOfRequests());
+ this.metricsMaster.incrementRequests(sl.getTotalNumberOfRequests());
}
} catch (IOException ioe) {
throw new ServiceException(ioe);
@@ -1834,7 +1833,14 @@ Server {
}
public String getClusterId() {
- return fileSystemManager.getClusterId().toString();
+ if (fileSystemManager == null) {
+ return "";
+ }
+ ClusterId id = fileSystemManager.getClusterId();
+ if (id == null) {
+ return "";
+ }
+ return id.toString();
}
/**
@@ -2232,7 +2238,15 @@ Server {
* @return the average load
*/
public double getAverageLoad() {
- return this.assignmentManager.getRegionStates().getAverageLoad();
+ if (this.assignmentManager == null) {
+ return 0;
+ }
+
+ RegionStates regionStates = this.assignmentManager.getRegionStates();
+ if (regionStates == null) {
+ return 0;
+ }
+ return regionStates.getAverageLoad();
}
/**
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
index 8a1bc465efe..48714137f95 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
@@ -44,10 +44,8 @@ import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.backup.HFileArchiver;
import org.apache.hadoop.hbase.fs.HFileSystem;
-import org.apache.hadoop.hbase.master.metrics.MasterMetrics;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.wal.HLog;
-import org.apache.hadoop.hbase.regionserver.RegionAlreadyInTransitionException;
import org.apache.hadoop.hbase.regionserver.wal.HLogSplitter;
import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
import org.apache.hadoop.hbase.regionserver.wal.OrphanHLogAfterSplitException;
@@ -69,7 +67,7 @@ public class MasterFileSystem {
// master status
Server master;
// metrics for master
- MasterMetrics metrics;
+ MetricsMaster metricsMaster;
// Persisted unique cluster ID
private ClusterId clusterId;
// Keep around for convenience.
@@ -87,12 +85,12 @@ public class MasterFileSystem {
private final MasterServices services;
public MasterFileSystem(Server master, MasterServices services,
- MasterMetrics metrics, boolean masterRecovery)
+ MetricsMaster metricsMaster, boolean masterRecovery)
throws IOException {
this.conf = master.getConfiguration();
this.master = master;
this.services = services;
- this.metrics = metrics;
+ this.metricsMaster = metricsMaster;
// Set filesystem to be that of this.rootdir else we get complaints about
// mismatched filesystems if hbase.rootdir is hdfs and fs.defaultFS is
// default localfs. Presumption is that rootdir is fully-qualified before
@@ -317,8 +315,8 @@ public class MasterFileSystem {
}
}
- if (this.metrics != null) {
- this.metrics.addSplit(splitTime, splitLogSize);
+ if (this.metricsMaster != null) {
+ this.metricsMaster.addSplit(splitTime, splitLogSize);
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetrics.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMaster.java
similarity index 74%
rename from hbase-server/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetrics.java
rename to hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMaster.java
index 603d3e93e78..578bca4a242 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetrics.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMaster.java
@@ -15,13 +15,16 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.hbase.master.metrics;
+package org.apache.hadoop.hbase.master;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
+import org.apache.hadoop.hbase.master.MetricsMasterSource;
+import org.apache.hadoop.hbase.master.MetricsMasterSourceFactory;
+import org.apache.hadoop.hbase.master.MetricsMasterWrapper;
/**
* This class is for maintaining the various master statistics
@@ -32,17 +35,17 @@ import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
*/
@InterfaceStability.Evolving
@InterfaceAudience.Private
-public class MasterMetrics {
+public class MetricsMaster {
private final Log LOG = LogFactory.getLog(this.getClass());
- private MasterMetricsSource masterMetricsSource;
+ private MetricsMasterSource masterSource;
- public MasterMetrics(MasterMetricsWrapper masterWrapper) {
- masterMetricsSource = CompatibilitySingletonFactory.getInstance(MasterMetricsSourceFactory.class).create(masterWrapper);
+ public MetricsMaster(MetricsMasterWrapper masterWrapper) {
+ masterSource = CompatibilitySingletonFactory.getInstance(MetricsMasterSourceFactory.class).create(masterWrapper);
}
// for unit-test usage
- public MasterMetricsSource getMetricsSource() {
- return masterMetricsSource;
+ public MetricsMasterSource getMetricsSource() {
+ return masterSource;
}
/**
@@ -51,15 +54,15 @@ public class MasterMetrics {
* @param size length of original HLogs that were split
*/
public synchronized void addSplit(long time, long size) {
- masterMetricsSource.updateSplitTime(time);
- masterMetricsSource.updateSplitSize(size);
+ masterSource.updateSplitTime(time);
+ masterSource.updateSplitSize(size);
}
/**
* @param inc How much to add to requests.
*/
public void incrementRequests(final int inc) {
- masterMetricsSource.incRequests(inc);
+ masterSource.incRequests(inc);
}
@@ -68,7 +71,7 @@ public class MasterMetrics {
* @param ritCount
*/
public void updateRITCount(int ritCount) {
- masterMetricsSource.setRIT(ritCount);
+ masterSource.setRIT(ritCount);
}
/**
@@ -77,13 +80,13 @@ public class MasterMetrics {
* @param ritCountOverThreshold
*/
public void updateRITCountOverThreshold(int ritCountOverThreshold) {
- masterMetricsSource.setRITCountOverThreshold(ritCountOverThreshold);
+ masterSource.setRITCountOverThreshold(ritCountOverThreshold);
}
/**
* update the timestamp for oldest region in transition metrics.
* @param timestamp
*/
public void updateRITOldestAge(long timestamp) {
- masterMetricsSource.setRITOldestAge(timestamp);
+ masterSource.setRITOldestAge(timestamp);
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapperImpl.java
similarity index 62%
rename from hbase-server/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsWrapperImpl.java
rename to hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapperImpl.java
index 3a589869b09..dec2dd0e0d9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsWrapperImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapperImpl.java
@@ -15,18 +15,21 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.hbase.master.metrics;
+package org.apache.hadoop.hbase.master;
+import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.master.MetricsMasterWrapper;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
/**
* Impl for exposing HMaster Information through JMX
*/
-public class MasterMetricsWrapperImpl implements MasterMetricsWrapper {
+public class MetricsMasterWrapperImpl implements MetricsMasterWrapper {
private final HMaster master;
- public MasterMetricsWrapperImpl(final HMaster master) {
+ public MetricsMasterWrapperImpl(final HMaster master) {
this.master = master;
}
@@ -42,7 +45,11 @@ public class MasterMetricsWrapperImpl implements MasterMetricsWrapper {
@Override
public String getZookeeperQuorum() {
- return master.getZooKeeperWatcher().getQuorum();
+ ZooKeeperWatcher zk = master.getZooKeeperWatcher();
+ if (zk == null) {
+ return "";
+ }
+ return zk.getQuorum();
}
@Override
@@ -51,28 +58,40 @@ public class MasterMetricsWrapperImpl implements MasterMetricsWrapper {
}
@Override
- public long getMasterStartTime() {
+ public long getStartTime() {
return master.getMasterStartTime();
}
@Override
- public long getMasterActiveTime() {
+ public long getActiveTime() {
return master.getMasterActiveTime();
}
@Override
public int getRegionServers() {
- return this.master.getServerManager().getOnlineServers().size();
+ ServerManager serverManager = this.master.getServerManager();
+ if (serverManager == null) {
+ return 0;
+ }
+ return serverManager.getOnlineServers().size();
}
@Override
public int getDeadRegionServers() {
- return master.getServerManager().getDeadServers().size();
+ ServerManager serverManager = this.master.getServerManager();
+ if (serverManager == null) {
+ return 0;
+ }
+ return serverManager.getDeadServers().size();
}
@Override
public String getServerName() {
- return master.getServerName().getServerName();
+ ServerName serverName = master.getServerName();
+ if (serverName == null) {
+ return "";
+ }
+ return serverName.getServerName();
}
@Override
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 0169315d052..cbb444d53b2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -81,7 +81,6 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HConstants.OperationStatusCode;
import org.apache.hadoop.hbase.HDFSBlocksDistribution;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HServerInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.NotServingRegionException;
@@ -116,11 +115,7 @@ import org.apache.hadoop.hbase.ipc.HBaseServer;
import org.apache.hadoop.hbase.ipc.RpcCallContext;
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.monitoring.TaskMonitor;
-import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
-import org.apache.hadoop.hbase.regionserver.metrics.OperationMetrics;
-import org.apache.hadoop.hbase.regionserver.metrics.RegionMetricsStorage;
-import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
@@ -235,16 +230,21 @@ public class HRegion implements HeapSize { // , Writable{
// private int [] storeSize = null;
// private byte [] name = null;
- final AtomicLong memstoreSize = new AtomicLong(0);
+ public final AtomicLong memstoreSize = new AtomicLong(0);
// Debug possible data loss due to WAL off
- final AtomicLong numPutsWithoutWAL = new AtomicLong(0);
- final AtomicLong dataInMemoryWithoutWAL = new AtomicLong(0);
+ final Counter numPutsWithoutWAL = new Counter();
+ final Counter dataInMemoryWithoutWAL = new Counter();
+ // Debug why CAS operations are taking a while.
final Counter checkAndMutateChecksPassed = new Counter();
final Counter checkAndMutateChecksFailed = new Counter();
+
+ //Number of requests
final Counter readRequestsCount = new Counter();
final Counter writeRequestsCount = new Counter();
+
+ //How long operations were blocked by a memstore over highwater.
final Counter updatesBlockedMs = new Counter();
/**
@@ -362,7 +362,8 @@ public class HRegion implements HeapSize { // , Writable{
public final static String REGIONINFO_FILE = ".regioninfo";
private HTableDescriptor htableDescriptor = null;
private RegionSplitPolicy splitPolicy;
- private final OperationMetrics opMetrics;
+
+ private final MetricsRegion metricsRegion;
/**
* Should only be used for testing purposes
@@ -386,7 +387,7 @@ public class HRegion implements HeapSize { // , Writable{
this.coprocessorHost = null;
this.scannerReadPoints = new ConcurrentHashMap();
- this.opMetrics = new OperationMetrics();
+ this.metricsRegion = new MetricsRegion(new MetricsRegionWrapperImpl(this));
}
/**
@@ -449,7 +450,7 @@ public class HRegion implements HeapSize { // , Writable{
this.regiondir = getRegionDir(this.tableDir, encodedNameStr);
this.scannerReadPoints = new ConcurrentHashMap();
- this.opMetrics = new OperationMetrics(conf, this.regionInfo);
+ this.metricsRegion = new MetricsRegion(new MetricsRegionWrapperImpl(this));
/*
* timestamp.slop provides a server-side constraint on the timestamp. This
@@ -839,21 +840,20 @@ public class HRegion implements HeapSize { // , Writable{
return this.rsServices;
}
- /** @return requestsCount for this region */
- public long getRequestsCount() {
- return this.readRequestsCount.get() + this.writeRequestsCount.get();
- }
-
/** @return readRequestsCount for this region */
- public long getReadRequestsCount() {
+ long getReadRequestsCount() {
return this.readRequestsCount.get();
}
/** @return writeRequestsCount for this region */
- public long getWriteRequestsCount() {
+ long getWriteRequestsCount() {
return this.writeRequestsCount.get();
}
+ MetricsRegion getMetrics() {
+ return metricsRegion;
+ }
+
/** @return true if region is closed */
public boolean isClosed() {
return this.closed.get();
@@ -1023,7 +1023,7 @@ public class HRegion implements HeapSize { // , Writable{
status.setStatus("Running coprocessor post-close hooks");
this.coprocessorHost.postClose(abort);
}
- this.opMetrics.closeMetrics();
+ this.metricsRegion.close();
status.markComplete("Closed");
LOG.info("Closed " + this);
return result;
@@ -1723,7 +1723,6 @@ public class HRegion implements HeapSize { // , Writable{
protected RegionScanner getScanner(Scan scan,
List additionalScanners) throws IOException {
startRegionOperation();
- this.readRequestsCount.increment();
try {
// Verify families are all valid
prepareScanner(scan);
@@ -2322,26 +2321,20 @@ public class HRegion implements HeapSize { // , Writable{
}
}
- // do after lock
- final long netTimeMs = EnvironmentEdgeManager.currentTimeMillis() - startTimeMs;
-
// See if the column families were consistent through the whole thing.
// if they were then keep them. If they were not then pass a null.
// null will be treated as unknown.
// Total time taken might be involving Puts and Deletes.
// Split the time for puts and deletes based on the total number of Puts and Deletes.
- long timeTakenForPuts = 0;
+
if (noOfPuts > 0) {
// There were some Puts in the batch.
double noOfMutations = noOfPuts + noOfDeletes;
- timeTakenForPuts = (long) (netTimeMs * (noOfPuts / noOfMutations));
- final Set keptCfs = putsCfSetConsistent ? putsCfSet : null;
- this.opMetrics.updateMultiPutMetrics(keptCfs, timeTakenForPuts);
+ this.metricsRegion.updatePut();
}
if (noOfDeletes > 0) {
// There were some Deletes in the batch.
- final Set keptCfs = deletesCfSetConsistent ? deletesCfSet : null;
- this.opMetrics.updateMultiDeleteMetrics(keptCfs, netTimeMs - timeTakenForPuts);
+ this.metricsRegion.updateDelete();
}
if (!success) {
for (int i = firstIndex; i < lastIndexExclusive; i++) {
@@ -3179,7 +3172,7 @@ public class HRegion implements HeapSize { // , Writable{
/**
* See if row is currently locked.
- * @param lockid
+ * @param lockId
* @return boolean
*/
boolean isRowLocked(final Integer lockId) {
@@ -4248,7 +4241,6 @@ public class HRegion implements HeapSize { // , Writable{
*/
private List get(Get get, boolean withCoprocessor)
throws IOException {
- long now = EnvironmentEdgeManager.currentTimeMillis();
List results = new ArrayList();
@@ -4264,7 +4256,7 @@ public class HRegion implements HeapSize { // , Writable{
RegionScanner scanner = null;
try {
scanner = getScanner(scan);
- scanner.next(results, SchemaMetrics.METRIC_GETSIZE);
+ scanner.next(results);
} finally {
if (scanner != null)
scanner.close();
@@ -4276,8 +4268,8 @@ public class HRegion implements HeapSize { // , Writable{
}
// do after lock
- final long after = EnvironmentEdgeManager.currentTimeMillis();
- this.opMetrics.updateGetMetrics(get.familySet(), after - now);
+
+ this.metricsRegion.updateGet();
return results;
}
@@ -4324,9 +4316,6 @@ public class HRegion implements HeapSize { // , Writable{
public void processRowsWithLocks(RowProcessor> processor, long timeout)
throws IOException {
- final long startNanoTime = System.nanoTime();
- String metricsName = "rowprocessor." + processor.getName();
-
for (byte[] row : processor.getRowsToLock()) {
checkRow(row, "processRowsWithLocks");
}
@@ -4349,20 +4338,13 @@ public class HRegion implements HeapSize { // , Writable{
processor, now, this, null, null, timeout);
processor.postProcess(this, walEdit);
} catch (IOException e) {
- long endNanoTime = System.nanoTime();
- RegionMetricsStorage.incrTimeVaryingMetric(metricsName + ".error.nano",
- endNanoTime - startNanoTime);
throw e;
} finally {
closeRegionOperation();
}
- final long endNanoTime = System.nanoTime();
- RegionMetricsStorage.incrTimeVaryingMetric(metricsName + ".nano",
- endNanoTime - startNanoTime);
return;
}
- long lockedNanoTime, processDoneNanoTime, unlockedNanoTime = 0;
MultiVersionConsistencyControl.WriteEntry writeEntry = null;
boolean locked = false;
boolean walSyncSuccessful = false;
@@ -4385,7 +4367,6 @@ public class HRegion implements HeapSize { // , Writable{
// 3. Region lock
this.updatesLock.readLock().lock();
locked = true;
- lockedNanoTime = System.nanoTime();
long now = EnvironmentEdgeManager.currentTimeMillis();
try {
@@ -4393,7 +4374,6 @@ public class HRegion implements HeapSize { // , Writable{
// waledits
doProcessRowWithTimeout(
processor, now, this, mutations, walEdit, timeout);
- processDoneNanoTime = System.nanoTime();
if (!mutations.isEmpty()) {
// 5. Get a mvcc write number
@@ -4418,7 +4398,6 @@ public class HRegion implements HeapSize { // , Writable{
this.updatesLock.readLock().unlock();
locked = false;
}
- unlockedNanoTime = System.nanoTime();
// 9. Release row lock(s)
if (acquiredLocks != null) {
@@ -4456,17 +4435,13 @@ public class HRegion implements HeapSize { // , Writable{
releaseRowLock(lid);
}
}
- unlockedNanoTime = unlockedNanoTime == 0 ?
- System.nanoTime() : unlockedNanoTime;
+
}
// 12. Run post-process hook
processor.postProcess(this, walEdit);
} catch (IOException e) {
- long endNanoTime = System.nanoTime();
- RegionMetricsStorage.incrTimeVaryingMetric(metricsName + ".error.nano",
- endNanoTime - startNanoTime);
throw e;
} finally {
closeRegionOperation();
@@ -4475,22 +4450,6 @@ public class HRegion implements HeapSize { // , Writable{
requestFlush();
}
}
- // Populate all metrics
- long endNanoTime = System.nanoTime();
- RegionMetricsStorage.incrTimeVaryingMetric(metricsName + ".nano",
- endNanoTime - startNanoTime);
-
- RegionMetricsStorage.incrTimeVaryingMetric(metricsName + ".acquirelock.nano",
- lockedNanoTime - startNanoTime);
-
- RegionMetricsStorage.incrTimeVaryingMetric(metricsName + ".process.nano",
- processDoneNanoTime - lockedNanoTime);
-
- RegionMetricsStorage.incrTimeVaryingMetric(metricsName + ".occupylock.nano",
- unlockedNanoTime - lockedNanoTime);
-
- RegionMetricsStorage.incrTimeVaryingMetric(metricsName + ".sync.nano",
- endNanoTime - unlockedNanoTime);
}
private void doProcessRowWithTimeout(final RowProcessor> processor,
@@ -4567,7 +4526,7 @@ public class HRegion implements HeapSize { // , Writable{
WALEdit walEdits = null;
List allKVs = new ArrayList(append.size());
Map> tempMemstore = new HashMap>();
- long before = EnvironmentEdgeManager.currentTimeMillis();
+
long size = 0;
long txid = 0;
@@ -4684,8 +4643,7 @@ public class HRegion implements HeapSize { // , Writable{
closeRegionOperation();
}
- long after = EnvironmentEdgeManager.currentTimeMillis();
- this.opMetrics.updateAppendMetrics(append.getFamilyMap().keySet(), after - before);
+ this.metricsRegion.updateAppend();
if (flush) {
@@ -4720,7 +4678,7 @@ public class HRegion implements HeapSize { // , Writable{
WALEdit walEdits = null;
List allKVs = new ArrayList(increment.numColumns());
Map> tempMemstore = new HashMap>();
- long before = EnvironmentEdgeManager.currentTimeMillis();
+
long size = 0;
long txid = 0;
@@ -4810,8 +4768,7 @@ public class HRegion implements HeapSize { // , Writable{
}
} finally {
closeRegionOperation();
- long after = EnvironmentEdgeManager.currentTimeMillis();
- this.opMetrics.updateIncrementMetrics(increment.getFamilyMap().keySet(), after - before);
+ this.metricsRegion.updateIncrement();
}
if (flush) {
@@ -5284,7 +5241,8 @@ public class HRegion implements HeapSize { // , Writable{
* These information are exposed by the region server metrics.
*/
private void recordPutWithoutWal(final Map> familyMap) {
- if (numPutsWithoutWAL.getAndIncrement() == 0) {
+ numPutsWithoutWAL.increment();
+ if (numPutsWithoutWAL.get() <= 1) {
LOG.info("writing data to region " + this +
" with WAL disabled. Data may be lost in the event of a crash.");
}
@@ -5296,7 +5254,7 @@ public class HRegion implements HeapSize { // , Writable{
}
}
- dataInMemoryWithoutWAL.addAndGet(putSize);
+ dataInMemoryWithoutWAL.add(putSize);
}
/**
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 06cbcce8ec8..e79ef487e2b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -48,13 +48,11 @@ import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ConcurrentSkipListMap;
import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import javax.management.ObjectName;
import com.google.protobuf.Message;
-import org.apache.commons.lang.mutable.MutableDouble;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
@@ -67,7 +65,6 @@ import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.FailedSanityCheckException;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HDFSBlocksDistribution;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
@@ -107,9 +104,7 @@ import org.apache.hadoop.hbase.executor.ExecutorService.ExecutorType;
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
import org.apache.hadoop.hbase.filter.ByteArrayComparable;
import org.apache.hadoop.hbase.fs.HFileSystem;
-import org.apache.hadoop.hbase.io.hfile.BlockCache;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
-import org.apache.hadoop.hbase.io.hfile.CacheStats;
import org.apache.hadoop.hbase.ipc.CoprocessorProtocol;
import org.apache.hadoop.hbase.ipc.HBaseRPC;
import org.apache.hadoop.hbase.ipc.HBaseRPCErrorHandler;
@@ -191,11 +186,6 @@ import org.apache.hadoop.hbase.regionserver.handler.CloseRootHandler;
import org.apache.hadoop.hbase.regionserver.handler.OpenMetaHandler;
import org.apache.hadoop.hbase.regionserver.handler.OpenRegionHandler;
import org.apache.hadoop.hbase.regionserver.handler.OpenRootHandler;
-import org.apache.hadoop.hbase.regionserver.metrics.RegionMetricsStorage;
-import org.apache.hadoop.hbase.regionserver.metrics.RegionServerDynamicMetrics;
-import org.apache.hadoop.hbase.regionserver.metrics.RegionServerMetrics;
-import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
-import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics.StoreMetricType;
import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
@@ -225,6 +215,7 @@ import org.apache.hadoop.net.DNS;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.StringUtils;
import org.apache.zookeeper.KeeperException;
+import org.cliffc.high_scale_lib.Counter;
import org.codehaus.jackson.map.ObjectMapper;
import com.google.common.base.Function;
@@ -297,9 +288,8 @@ public class HRegionServer implements ClientProtocol,
// Instance of the hbase executor service.
protected ExecutorService service;
- // Request counter.
- // Do we need this? Can't we just sum region counters? St.Ack 20110412
- protected AtomicInteger requestCount = new AtomicInteger();
+ // Request counter. (Includes requests that are not serviced by regions.)
+ final Counter requestCount = new Counter();
// If false, the file system has become unavailable
protected volatile boolean fsOk;
@@ -366,9 +356,7 @@ public class HRegionServer implements ClientProtocol,
*/
private final LinkedList reservedSpace = new LinkedList();
- private RegionServerMetrics metrics;
-
- private RegionServerDynamicMetrics dynamicMetrics;
+ private MetricsRegionServer metricsRegionServer;
/*
* Check for compactions requests.
@@ -403,7 +391,7 @@ public class HRegionServer implements ClientProtocol,
private final RegionServerAccounting regionServerAccounting;
// Cache configuration and block cache reference
- private final CacheConfig cacheConfig;
+ final CacheConfig cacheConfig;
// reference to the Thrift Server.
volatile private HRegionThriftServer thriftServer;
@@ -446,7 +434,6 @@ public class HRegionServer implements ClientProtocol,
*/
private final QosFunction qosFunction;
-
/**
* Starts a HRegionServer at the default location
*
@@ -550,6 +537,10 @@ public class HRegionServer implements ClientProtocol,
}
}
+ String getClusterId() {
+ return this.conf.get(HConstants.CLUSTER_ID);
+ }
+
@Retention(RetentionPolicy.RUNTIME)
protected @interface QosPriority {
int priority() default 0;
@@ -858,7 +849,6 @@ public class HRegionServer implements ClientProtocol,
break;
}
}
- registerMBean();
// We registered with the Master. Go into run mode.
long lastMsg = 0;
@@ -893,7 +883,6 @@ public class HRegionServer implements ClientProtocol,
}
long now = System.currentTimeMillis();
if ((now - lastMsg) >= msgInterval) {
- doMetrics();
tryRegionServerReport(lastMsg, now);
lastMsg = System.currentTimeMillis();
}
@@ -1022,8 +1011,6 @@ public class HRegionServer implements ClientProtocol,
void tryRegionServerReport(long reportStartTime, long reportEndTime)
throws IOException {
HBaseProtos.ServerLoad sl = buildServerLoad(reportStartTime, reportEndTime);
- // Why we do this?
- this.requestCount.set(0);
try {
RegionServerReportRequest.Builder request = RegionServerReportRequest.newBuilder();
ServerName sn = ServerName.parseVersionedServerName(
@@ -1044,13 +1031,21 @@ public class HRegionServer implements ClientProtocol,
}
HBaseProtos.ServerLoad buildServerLoad(long reportStartTime, long reportEndTime) {
+ // We're getting the MetricsRegionServerWrapper here because the wrapper computes requests
+ // per second, and other metrics As long as metrics are part of ServerLoad it's best to use
+ // the wrapper to compute those numbers in one place.
+ // In the long term most of these should be moved off of ServerLoad and the heart beat.
+ // Instead they should be stored in an HBase table so that external visibility into HBase is
+ // improved; Additionally the load balancer will be able to take advantage of a more complete
+ // history.
+ MetricsRegionServerWrapper regionServerWrapper = this.metricsRegionServer.getRegionServerWrapper();
Collection regions = getOnlineRegionsLocalContext();
MemoryUsage memory =
ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
HBaseProtos.ServerLoad.Builder serverLoad = HBaseProtos.ServerLoad.newBuilder();
- serverLoad.setNumberOfRequests((int)metrics.getRequests());
- serverLoad.setTotalNumberOfRequests(requestCount.get());
+ serverLoad.setNumberOfRequests((int) regionServerWrapper.getRequestsPerSecond());
+ serverLoad.setTotalNumberOfRequests((int) regionServerWrapper.getTotalRequestCount());
serverLoad.setUsedHeapMB((int)(memory.getUsed() / 1024 / 1024));
serverLoad.setMaxHeapMB((int) (memory.getMax() / 1024 / 1024));
Set coprocessors = this.hlog.getCoprocessorHost().getCoprocessors();
@@ -1205,8 +1200,7 @@ public class HRegionServer implements ClientProtocol,
this.tableDescriptors = new FSTableDescriptors(this.fs, this.rootDir, true);
this.hlog = setupWALAndReplication();
// Init in here rather than in constructor after thread name has been set
- this.metrics = new RegionServerMetrics();
- this.dynamicMetrics = RegionServerDynamicMetrics.newInstance();
+ this.metricsRegionServer = new MetricsRegionServer(new MetricsRegionServerWrapperImpl(this));
startServiceThreads();
LOG.info("Serving as " + this.serverNameFromMasterPOV +
", RPC listening on " + this.isa +
@@ -1441,179 +1435,8 @@ public class HRegionServer implements ClientProtocol,
return hlogRoller;
}
- /*
- * @param interval Interval since last time metrics were called.
- */
- protected void doMetrics() {
- try {
- metrics();
- } catch (Throwable e) {
- LOG.warn("Failed metrics", e);
- }
- }
-
- protected void metrics() {
- this.metrics.regions.set(this.onlineRegions.size());
- this.metrics.incrementRequests(this.requestCount.get());
- this.metrics.requests.intervalHeartBeat();
- // Is this too expensive every three seconds getting a lock on onlineRegions
- // and then per store carried? Can I make metrics be sloppier and avoid
- // the synchronizations?
- int stores = 0;
- int storefiles = 0;
- long memstoreSize = 0;
- int readRequestsCount = 0;
- int writeRequestsCount = 0;
- long checkAndMutateChecksFailed = 0;
- long checkAndMutateChecksPassed = 0;
- long storefileIndexSize = 0;
- HDFSBlocksDistribution hdfsBlocksDistribution =
- new HDFSBlocksDistribution();
- long totalStaticIndexSize = 0;
- long totalStaticBloomSize = 0;
- long numPutsWithoutWAL = 0;
- long dataInMemoryWithoutWAL = 0;
- long updatesBlockedMs = 0;
-
- // Note that this is a map of Doubles instead of Longs. This is because we
- // do effective integer division, which would perhaps truncate more than it
- // should because we do it only on one part of our sum at a time. Rather
- // than dividing at the end, where it is difficult to know the proper
- // factor, everything is exact then truncated.
- final Map tempVals =
- new HashMap();
-
- for (Map.Entry e : this.onlineRegions.entrySet()) {
- HRegion r = e.getValue();
- memstoreSize += r.memstoreSize.get();
- numPutsWithoutWAL += r.numPutsWithoutWAL.get();
- dataInMemoryWithoutWAL += r.dataInMemoryWithoutWAL.get();
- readRequestsCount += r.readRequestsCount.get();
- writeRequestsCount += r.writeRequestsCount.get();
- checkAndMutateChecksFailed += r.checkAndMutateChecksFailed.get();
- checkAndMutateChecksPassed += r.checkAndMutateChecksPassed.get();
- updatesBlockedMs += r.updatesBlockedMs.get();
- synchronized (r.stores) {
- stores += r.stores.size();
- for (Map.Entry ee : r.stores.entrySet()) {
- final Store store = ee.getValue();
- final SchemaMetrics schemaMetrics = store.getSchemaMetrics();
-
- {
- long tmpStorefiles = store.getStorefilesCount();
- schemaMetrics.accumulateStoreMetric(tempVals,
- StoreMetricType.STORE_FILE_COUNT, tmpStorefiles);
- storefiles += tmpStorefiles;
- }
-
-
- {
- long tmpStorefileIndexSize = store.getStorefilesIndexSize();
- schemaMetrics.accumulateStoreMetric(tempVals,
- StoreMetricType.STORE_FILE_INDEX_SIZE,
- (long) (tmpStorefileIndexSize / (1024.0 * 1024)));
- storefileIndexSize += tmpStorefileIndexSize;
- }
-
- {
- long tmpStorefilesSize = store.getStorefilesSize();
- schemaMetrics.accumulateStoreMetric(tempVals,
- StoreMetricType.STORE_FILE_SIZE_MB,
- (long) (tmpStorefilesSize / (1024.0 * 1024)));
- }
-
- {
- long tmpStaticBloomSize = store.getTotalStaticBloomSize();
- schemaMetrics.accumulateStoreMetric(tempVals,
- StoreMetricType.STATIC_BLOOM_SIZE_KB,
- (long) (tmpStaticBloomSize / 1024.0));
- totalStaticBloomSize += tmpStaticBloomSize;
- }
-
- {
- long tmpStaticIndexSize = store.getTotalStaticIndexSize();
- schemaMetrics.accumulateStoreMetric(tempVals,
- StoreMetricType.STATIC_INDEX_SIZE_KB,
- (long) (tmpStaticIndexSize / 1024.0));
- totalStaticIndexSize += tmpStaticIndexSize;
- }
-
- schemaMetrics.accumulateStoreMetric(tempVals,
- StoreMetricType.MEMSTORE_SIZE_MB,
- (long) (store.getMemStoreSize() / (1024.0 * 1024)));
- }
- }
-
- hdfsBlocksDistribution.add(r.getHDFSBlocksDistribution());
- }
-
- for (Entry e : tempVals.entrySet()) {
- RegionMetricsStorage.setNumericMetric(e.getKey(), e.getValue().longValue());
- }
-
- this.metrics.stores.set(stores);
- this.metrics.storefiles.set(storefiles);
- this.metrics.memstoreSizeMB.set((int) (memstoreSize / (1024 * 1024)));
- this.metrics.mbInMemoryWithoutWAL.set((int) (dataInMemoryWithoutWAL / (1024 * 1024)));
- this.metrics.numPutsWithoutWAL.set(numPutsWithoutWAL);
- this.metrics.storefileIndexSizeMB.set(
- (int) (storefileIndexSize / (1024 * 1024)));
- this.metrics.rootIndexSizeKB.set(
- (int) (storefileIndexSize / 1024));
- this.metrics.totalStaticIndexSizeKB.set(
- (int) (totalStaticIndexSize / 1024));
- this.metrics.totalStaticBloomSizeKB.set(
- (int) (totalStaticBloomSize / 1024));
- this.metrics.readRequestsCount.set(readRequestsCount);
- this.metrics.writeRequestsCount.set(writeRequestsCount);
- this.metrics.checkAndMutateChecksFailed.set(checkAndMutateChecksFailed);
- this.metrics.checkAndMutateChecksPassed.set(checkAndMutateChecksPassed);
- this.metrics.compactionQueueSize.set(compactSplitThread
- .getCompactionQueueSize());
- this.metrics.flushQueueSize.set(cacheFlusher
- .getFlushQueueSize());
- this.metrics.updatesBlockedSeconds.update(updatesBlockedMs > 0 ?
- updatesBlockedMs/1000: 0);
- final long updatesBlockedMsHigherWater = cacheFlusher.getUpdatesBlockedMsHighWater().get();
- this.metrics.updatesBlockedSecondsHighWater.update(updatesBlockedMsHigherWater > 0 ?
- updatesBlockedMsHigherWater/1000: 0);
-
- BlockCache blockCache = cacheConfig.getBlockCache();
- if (blockCache != null) {
- this.metrics.blockCacheCount.set(blockCache.size());
- this.metrics.blockCacheFree.set(blockCache.getFreeSize());
- this.metrics.blockCacheSize.set(blockCache.getCurrentSize());
- CacheStats cacheStats = blockCache.getStats();
- this.metrics.blockCacheHitCount.set(cacheStats.getHitCount());
- this.metrics.blockCacheMissCount.set(cacheStats.getMissCount());
- this.metrics.blockCacheEvictedCount.set(blockCache.getEvictedCount());
- double ratio = blockCache.getStats().getHitRatio();
- int percent = (int) (ratio * 100);
- this.metrics.blockCacheHitRatio.set(percent);
- ratio = blockCache.getStats().getHitCachingRatio();
- percent = (int) (ratio * 100);
- this.metrics.blockCacheHitCachingRatio.set(percent);
- // past N period block cache hit / hit caching ratios
- cacheStats.rollMetricsPeriod();
- ratio = cacheStats.getHitRatioPastNPeriods();
- percent = (int) (ratio * 100);
- this.metrics.blockCacheHitRatioPastNPeriods.set(percent);
- ratio = cacheStats.getHitCachingRatioPastNPeriods();
- percent = (int) (ratio * 100);
- this.metrics.blockCacheHitCachingRatioPastNPeriods.set(percent);
- }
- float localityIndex = hdfsBlocksDistribution.getBlockLocalityIndex(
- getServerName().getHostname());
- int percent = (int) (localityIndex * 100);
- this.metrics.hdfsBlocksLocalityIndex.set(percent);
-
- }
-
- /**
- * @return Region server metrics instance.
- */
- public RegionServerMetrics getMetrics() {
- return this.metrics;
+ public MetricsRegionServer getMetrics() {
+ return this.metricsRegionServer;
}
/**
@@ -1841,9 +1664,6 @@ public class HRegionServer implements ClientProtocol,
// java.util.HashSet's toString() method to print the coprocessor names.
LOG.fatal("RegionServer abort: loaded coprocessors are: " +
CoprocessorHost.getLoadedCoprocessors());
- if (this.metrics != null) {
- LOG.info("Dump of metrics: " + this.metrics);
- }
// Do our best to report our abort to the master, but this may not work
try {
if (cause != null) {
@@ -2146,45 +1966,7 @@ public class HRegionServer implements ClientProtocol,
}
/**
- * @param encodedRegionName
- * @return JSON Map of labels to values for passed in encodedRegionName
- * @throws IOException
- */
- public byte [] getRegionStats(final String encodedRegionName)
- throws IOException {
- HRegion r = null;
- synchronized (this.onlineRegions) {
- r = this.onlineRegions.get(encodedRegionName);
- }
- if (r == null) return null;
- ObjectMapper mapper = new ObjectMapper();
- int stores = 0;
- int storefiles = 0;
- int storefileSizeMB = 0;
- int memstoreSizeMB = (int) (r.memstoreSize.get() / 1024 / 1024);
- int storefileIndexSizeMB = 0;
- synchronized (r.stores) {
- stores += r.stores.size();
- for (Store store : r.stores.values()) {
- storefiles += store.getStorefilesCount();
- storefileSizeMB += (int) (store.getStorefilesSize() / 1024 / 1024);
- storefileIndexSizeMB += (int) (store.getStorefilesIndexSize() / 1024 / 1024);
- }
- }
- Map map = new TreeMap();
- map.put("stores", stores);
- map.put("storefiles", storefiles);
- map.put("storefileSizeMB", storefileSizeMB);
- map.put("storefileIndexSizeMB", storefileIndexSizeMB);
- map.put("memstoreSizeMB", memstoreSizeMB);
- StringWriter w = new StringWriter();
- mapper.writeValue(w, map);
- w.close();
- return Bytes.toBytes(w.toString());
- }
-
- /**
- * For tests and web ui.
+ * For tests, web ui and metrics.
* This method will only work if HRegionServer is in the same JVM as client;
* HRegion cannot be serialized to cross an rpc.
* @see #getOnlineRegions()
@@ -2218,11 +2000,6 @@ public class HRegionServer implements ClientProtocol,
return sortedRegions;
}
- /** @return the request count */
- public AtomicInteger getRequestCount() {
- return this.requestCount;
- }
-
/**
* @return time stamp in millis of when this region server was started
*/
@@ -2497,16 +2274,6 @@ public class HRegionServer implements ClientProtocol,
new ServerLoad(sl).getRegionServerCoprocessors();
}
- /**
- * Register bean with platform management server
- */
- void registerMBean() {
- MXBeanImpl mxBeanInfo = MXBeanImpl.init(this);
- mxBean = MBeanUtil.registerMBean("RegionServer", "RegionServer",
- mxBeanInfo);
- LOG.info("Registered RegionServer MXBean");
- }
-
/**
* Instantiated as a row lock lease. If the lease times out, the row lock is
* released
@@ -2685,14 +2452,7 @@ public class HRegionServer implements ClientProtocol,
if (destination != null){
addToMovedRegions(encodedRegionName, destination);
}
-
- //Clear all of the dynamic metrics as they are now probably useless.
- //This is a clear because dynamic metrics could include metrics per cf and
- //per hfile. Figuring out which cfs, hfiles, and regions are still relevant to
- //this region server would be an onerous task. Instead just clear everything
- //and on the next tick of the metrics everything that is still relevant will be
- //re-added.
- this.dynamicMetrics.clear();
+
return toReturn != null;
}
@@ -2885,8 +2645,9 @@ public class HRegionServer implements ClientProtocol,
@Override
public GetResponse get(final RpcController controller,
final GetRequest request) throws ServiceException {
+ long before = EnvironmentEdgeManager.currentTimeMillis();
try {
- requestCount.incrementAndGet();
+ requestCount.increment();
HRegion region = getRegion(request.getRegion());
GetResponse.Builder builder = GetResponse.newBuilder();
ClientProtos.Get get = request.getGet();
@@ -2926,6 +2687,8 @@ public class HRegionServer implements ClientProtocol,
return builder.build();
} catch (IOException ie) {
throw new ServiceException(ie);
+ } finally {
+ metricsRegionServer.updateGet(EnvironmentEdgeManager.currentTimeMillis() - before);
}
}
@@ -2940,7 +2703,7 @@ public class HRegionServer implements ClientProtocol,
public MutateResponse mutate(final RpcController controller,
final MutateRequest request) throws ServiceException {
try {
- requestCount.incrementAndGet();
+ requestCount.increment();
HRegion region = getRegion(request.getRegion());
MutateResponse.Builder builder = MutateResponse.newBuilder();
Mutate mutate = request.getMutate();
@@ -3073,7 +2836,7 @@ public class HRegionServer implements ClientProtocol,
}
throw e;
}
- requestCount.incrementAndGet();
+ requestCount.increment();
try {
int ttl = 0;
@@ -3167,7 +2930,7 @@ public class HRegionServer implements ClientProtocol,
for (int i = 0; i < rows
&& currentScanResultSize < maxResultSize; i++) {
// Collect values to be returned here
- boolean moreRows = scanner.next(values, SchemaMetrics.METRIC_NEXTSIZE);
+ boolean moreRows = scanner.next(values);
if (!values.isEmpty()) {
for (KeyValue kv : values) {
currentScanResultSize += kv.heapSize();
@@ -3261,7 +3024,7 @@ public class HRegionServer implements ClientProtocol,
throw new DoNotRetryIOException(
"lockRow supports only one row now, not " + request.getRowCount() + " rows");
}
- requestCount.incrementAndGet();
+ requestCount.increment();
HRegion region = getRegion(request.getRegion());
byte[] row = request.getRow(0).toByteArray();
try {
@@ -3292,7 +3055,7 @@ public class HRegionServer implements ClientProtocol,
public UnlockRowResponse unlockRow(final RpcController controller,
final UnlockRowRequest request) throws ServiceException {
try {
- requestCount.incrementAndGet();
+ requestCount.increment();
HRegion region = getRegion(request.getRegion());
if (!request.hasLockId()) {
throw new DoNotRetryIOException(
@@ -3327,7 +3090,7 @@ public class HRegionServer implements ClientProtocol,
public BulkLoadHFileResponse bulkLoadHFile(final RpcController controller,
final BulkLoadHFileRequest request) throws ServiceException {
try {
- requestCount.incrementAndGet();
+ requestCount.increment();
HRegion region = getRegion(request.getRegion());
List> familyPaths = new ArrayList>();
for (FamilyPath familyPath: request.getFamilyPathList()) {
@@ -3374,7 +3137,7 @@ public class HRegionServer implements ClientProtocol,
public ExecCoprocessorResponse execCoprocessor(final RpcController controller,
final ExecCoprocessorRequest request) throws ServiceException {
try {
- requestCount.incrementAndGet();
+ requestCount.increment();
HRegion region = getRegion(request.getRegion());
ExecCoprocessorResponse.Builder
builder = ExecCoprocessorResponse.newBuilder();
@@ -3392,7 +3155,7 @@ public class HRegionServer implements ClientProtocol,
public CoprocessorServiceResponse execService(final RpcController controller,
final CoprocessorServiceRequest request) throws ServiceException {
try {
- requestCount.incrementAndGet();
+ requestCount.increment();
HRegion region = getRegion(request.getRegion());
// ignore the passed in controller (from the serialized call)
ServerRpcController execController = new ServerRpcController();
@@ -3441,7 +3204,7 @@ public class HRegionServer implements ClientProtocol,
ActionResult.Builder resultBuilder = null;
List mutates = new ArrayList();
for (ClientProtos.MultiAction actionUnion : request.getActionList()) {
- requestCount.incrementAndGet();
+ requestCount.increment();
try {
Object result = null;
if (actionUnion.hasGet()) {
@@ -3524,7 +3287,7 @@ public class HRegionServer implements ClientProtocol,
final GetRegionInfoRequest request) throws ServiceException {
try {
checkOpen();
- requestCount.incrementAndGet();
+ requestCount.increment();
HRegion region = getRegion(request.getRegion());
HRegionInfo info = region.getRegionInfo();
GetRegionInfoResponse.Builder builder = GetRegionInfoResponse.newBuilder();
@@ -3544,7 +3307,7 @@ public class HRegionServer implements ClientProtocol,
final GetStoreFileRequest request) throws ServiceException {
try {
HRegion region = getRegion(request.getRegion());
- requestCount.incrementAndGet();
+ requestCount.increment();
Set columnFamilies = null;
if (request.getFamilyCount() == 0) {
columnFamilies = region.getStores().keySet();
@@ -3571,7 +3334,7 @@ public class HRegionServer implements ClientProtocol,
final GetOnlineRegionRequest request) throws ServiceException {
try {
checkOpen();
- requestCount.incrementAndGet();
+ requestCount.increment();
List list = new ArrayList(onlineRegions.size());
for (HRegion region: this.onlineRegions.values()) {
list.add(region.getRegionInfo());
@@ -3602,7 +3365,7 @@ public class HRegionServer implements ClientProtocol,
} catch (IOException ie) {
throw new ServiceException(ie);
}
- requestCount.incrementAndGet();
+ requestCount.increment();
OpenRegionResponse.Builder builder = OpenRegionResponse.newBuilder();
int regionCount = request.getOpenInfoCount();
Map htds =
@@ -3694,7 +3457,6 @@ public class HRegionServer implements ClientProtocol,
try {
checkOpen();
- requestCount.incrementAndGet();
String encodedRegionName =
ProtobufUtil.getRegionEncodedName(request.getRegion());
byte[] encodedName = Bytes.toBytes(encodedRegionName);
@@ -3706,6 +3468,7 @@ public class HRegionServer implements ClientProtocol,
checkIfRegionInTransition(encodedName, CLOSE);
}
HRegion region = getRegionByEncodedName(encodedRegionName);
+ requestCount.increment();
LOG.info("Received close region: " + region.getRegionNameAsString() +
". Version of ZK closing node:" + versionOfClosingNode +
". Destination server:" + sn);
@@ -3734,7 +3497,7 @@ public class HRegionServer implements ClientProtocol,
final FlushRegionRequest request) throws ServiceException {
try {
checkOpen();
- requestCount.incrementAndGet();
+ requestCount.increment();
HRegion region = getRegion(request.getRegion());
LOG.info("Flushing " + region.getRegionNameAsString());
boolean shouldFlush = true;
@@ -3765,7 +3528,7 @@ public class HRegionServer implements ClientProtocol,
final SplitRegionRequest request) throws ServiceException {
try {
checkOpen();
- requestCount.incrementAndGet();
+ requestCount.increment();
HRegion region = getRegion(request.getRegion());
LOG.info("Splitting " + region.getRegionNameAsString());
region.flushcache();
@@ -3794,7 +3557,7 @@ public class HRegionServer implements ClientProtocol,
final CompactRegionRequest request) throws ServiceException {
try {
checkOpen();
- requestCount.incrementAndGet();
+ requestCount.increment();
HRegion region = getRegion(request.getRegion());
LOG.info("Compacting " + region.getRegionNameAsString());
boolean major = false;
@@ -3829,7 +3592,7 @@ public class HRegionServer implements ClientProtocol,
try {
if (replicationSinkHandler != null) {
checkOpen();
- requestCount.incrementAndGet();
+ requestCount.increment();
HLog.Entry[] entries = ProtobufUtil.toHLogEntries(request.getEntryList());
if (entries != null && entries.length > 0) {
replicationSinkHandler.replicateLogEntries(entries);
@@ -3852,7 +3615,7 @@ public class HRegionServer implements ClientProtocol,
public RollWALWriterResponse rollWALWriter(final RpcController controller,
final RollWALWriterRequest request) throws ServiceException {
try {
- requestCount.incrementAndGet();
+ requestCount.increment();
HLog wal = this.getWAL();
byte[][] regionsToFlush = wal.rollWriter(true);
RollWALWriterResponse.Builder builder = RollWALWriterResponse.newBuilder();
@@ -3877,7 +3640,7 @@ public class HRegionServer implements ClientProtocol,
@Override
public StopServerResponse stopServer(final RpcController controller,
final StopServerRequest request) throws ServiceException {
- requestCount.incrementAndGet();
+ requestCount.increment();
String reason = request.getReason();
stop(reason);
return StopServerResponse.newBuilder().build();
@@ -3894,7 +3657,7 @@ public class HRegionServer implements ClientProtocol,
public GetServerInfoResponse getServerInfo(final RpcController controller,
final GetServerInfoRequest request) throws ServiceException {
ServerName serverName = getServerName();
- requestCount.incrementAndGet();
+ requestCount.increment();
return ResponseConverter.buildGetServerInfoResponse(serverName, webuiport);
}
@@ -3924,6 +3687,7 @@ public class HRegionServer implements ClientProtocol,
*/
protected Result append(final HRegion region,
final Mutate mutate) throws IOException {
+ long before = EnvironmentEdgeManager.currentTimeMillis();
Append append = ProtobufUtil.toAppend(mutate);
Result r = null;
if (region.getCoprocessorHost() != null) {
@@ -3936,6 +3700,7 @@ public class HRegionServer implements ClientProtocol,
region.getCoprocessorHost().postAppend(append, r);
}
}
+ metricsRegionServer.updateAppend(EnvironmentEdgeManager.currentTimeMillis() - before);
return r;
}
@@ -3949,6 +3714,7 @@ public class HRegionServer implements ClientProtocol,
*/
protected Result increment(final HRegion region,
final Mutate mutate) throws IOException {
+ long before = EnvironmentEdgeManager.currentTimeMillis();
Increment increment = ProtobufUtil.toIncrement(mutate);
Result r = null;
if (region.getCoprocessorHost() != null) {
@@ -3961,6 +3727,7 @@ public class HRegionServer implements ClientProtocol,
r = region.getCoprocessorHost().postIncrement(increment, r);
}
}
+ metricsRegionServer.updateIncrement(EnvironmentEdgeManager.currentTimeMillis() - before);
return r;
}
@@ -3975,7 +3742,8 @@ public class HRegionServer implements ClientProtocol,
final HRegion region, final List mutates) {
@SuppressWarnings("unchecked")
Pair[] mutationsWithLocks = new Pair[mutates.size()];
-
+ long before = EnvironmentEdgeManager.currentTimeMillis();
+ boolean batchContainsPuts = false, batchContainsDelete = false;
try {
ActionResult.Builder resultBuilder = ActionResult.newBuilder();
NameBytesPair value = ProtobufUtil.toParameter(new Result());
@@ -3987,15 +3755,18 @@ public class HRegionServer implements ClientProtocol,
Mutation mutation = null;
if (m.getMutateType() == MutateType.PUT) {
mutation = ProtobufUtil.toPut(m);
+ batchContainsPuts = true;
} else {
mutation = ProtobufUtil.toDelete(m);
+ batchContainsDelete = true;
}
Integer lock = getLockFromId(mutation.getLockId());
mutationsWithLocks[i++] = new Pair(mutation, lock);
builder.addResult(result);
}
- requestCount.addAndGet(mutates.size());
+
+ requestCount.add(mutates.size());
if (!region.getRegionInfo().isMetaTable()) {
cacheFlusher.reclaimMemStoreMemory();
}
@@ -4031,6 +3802,13 @@ public class HRegionServer implements ClientProtocol,
builder.setResult(i, result);
}
}
+ long after = EnvironmentEdgeManager.currentTimeMillis();
+ if (batchContainsPuts) {
+ metricsRegionServer.updatePut(after - before);
+ }
+ if (batchContainsDelete) {
+ metricsRegionServer.updateDelete(after - before);
+ }
}
/**
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
index 88288adeeb4..8f7aaf70e02 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
@@ -66,8 +66,6 @@ import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.regionserver.compactions.CompactSelection;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
-import org.apache.hadoop.hbase.regionserver.metrics.SchemaConfigured;
-import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ChecksumType;
import org.apache.hadoop.hbase.util.ClassSize;
@@ -106,7 +104,7 @@ import com.google.common.collect.Lists;
* not be called directly but by an HRegion manager.
*/
@InterfaceAudience.Private
-public class HStore extends SchemaConfigured implements Store {
+public class HStore implements Store {
static final Log LOG = LogFactory.getLog(HStore.class);
protected final MemStore memstore;
@@ -174,9 +172,7 @@ public class HStore extends SchemaConfigured implements Store {
protected HStore(Path basedir, HRegion region, HColumnDescriptor family,
FileSystem fs, Configuration confParam)
throws IOException {
- super(new CompoundConfiguration().add(confParam).add(
- family.getValues()), region.getRegionInfo().getTableNameAsString(),
- Bytes.toString(family.getName()));
+
HRegionInfo info = region.getRegionInfo();
this.fs = fs;
// Assemble the store's home directory.
@@ -260,6 +256,15 @@ public class HStore extends SchemaConfigured implements Store {
return ttl;
}
+ public String getColumnFamilyName() {
+ return this.family.getNameAsString();
+ }
+
+ @Override
+ public String getTableName() {
+ return this.region.getTableDesc().getNameAsString();
+ }
+
/**
* Create this store's homedir
* @param fs
@@ -414,7 +419,6 @@ public class HStore extends SchemaConfigured implements Store {
public StoreFile call() throws IOException {
StoreFile storeFile = new StoreFile(fs, p, conf, cacheConf,
family.getBloomFilterType(), dataBlockEncoder);
- passSchemaMetricsTo(storeFile);
storeFile.createReader();
return storeFile;
}
@@ -573,7 +577,6 @@ public class HStore extends SchemaConfigured implements Store {
StoreFile sf = new StoreFile(fs, dstPath, this.conf, this.cacheConf,
this.family.getBloomFilterType(), this.dataBlockEncoder);
- passSchemaMetricsTo(sf);
StoreFile.Reader r = sf.createReader();
this.storeSize += r.length();
@@ -817,19 +820,11 @@ public class HStore extends SchemaConfigured implements Store {
status.setStatus("Flushing " + this + ": reopening flushed file");
StoreFile sf = new StoreFile(this.fs, dstPath, this.conf, this.cacheConf,
this.family.getBloomFilterType(), this.dataBlockEncoder);
- passSchemaMetricsTo(sf);
StoreFile.Reader r = sf.createReader();
this.storeSize += r.length();
this.totalUncompressedBytes += r.getTotalUncompressedBytes();
- // This increments the metrics associated with total flushed bytes for this
- // family. The overall flush count is stored in the static metrics and
- // retrieved from HRegion.recentFlushes, which is set within
- // HRegion.internalFlushcache, which indirectly calls this to actually do
- // the flushing through the StoreFlusherImpl class
- getSchemaMetrics().updatePersistentStoreMetric(
- SchemaMetrics.StoreMetricType.FLUSH_SIZE, flushedSize.longValue());
if (LOG.isInfoEnabled()) {
LOG.info("Added " + sf + ", entries=" + r.getEntries() +
", sequenceid=" + logCacheFlushId +
@@ -875,11 +870,6 @@ public class HStore extends SchemaConfigured implements Store {
.withBytesPerChecksum(bytesPerChecksum)
.withCompression(compression)
.build();
- // The store file writer's path does not include the CF name, so we need
- // to configure the HFile writer directly.
- SchemaConfigured sc = (SchemaConfigured) w.writer;
- SchemaConfigured.resetSchemaMetricsConf(sc);
- passSchemaMetricsTo(sc);
return w;
}
@@ -1409,8 +1399,8 @@ public class HStore extends SchemaConfigured implements Store {
(forcemajor || isMajorCompaction(compactSelection.getFilesToCompact())) &&
(compactSelection.getFilesToCompact().size() < this.maxFilesToCompact
);
- LOG.debug(this.getHRegionInfo().getEncodedName() + " - " +
- this.getColumnFamilyName() + ": Initiating " +
+ LOG.debug(this.getHRegionInfo().getEncodedName() + " - "
+ + this.getColumnFamilyName() + ": Initiating " +
(majorcompaction ? "major" : "minor") + "compaction");
if (!majorcompaction &&
@@ -1523,7 +1513,6 @@ public class HStore extends SchemaConfigured implements Store {
storeFile = new StoreFile(this.fs, path, this.conf,
this.cacheConf, this.family.getBloomFilterType(),
NoOpDataBlockEncoder.INSTANCE);
- passSchemaMetricsTo(storeFile);
storeFile.createReader();
} catch (IOException e) {
LOG.error("Failed to open store file : " + path
@@ -1575,7 +1564,6 @@ public class HStore extends SchemaConfigured implements Store {
}
result = new StoreFile(this.fs, destPath, this.conf, this.cacheConf,
this.family.getBloomFilterType(), this.dataBlockEncoder);
- passSchemaMetricsTo(result);
result.createReader();
}
try {
@@ -1936,7 +1924,7 @@ public class HStore extends SchemaConfigured implements Store {
@Override
public String toString() {
- return getColumnFamilyName();
+ return this.getColumnFamilyName();
}
@Override
@@ -2125,9 +2113,8 @@ public class HStore extends SchemaConfigured implements Store {
}
public static final long FIXED_OVERHEAD =
- ClassSize.align(SchemaConfigured.SCHEMA_CONFIGURED_UNALIGNED_HEAP_SIZE +
- + (17 * ClassSize.REFERENCE) + (6 * Bytes.SIZEOF_LONG)
- + (5 * Bytes.SIZEOF_INT) + Bytes.SIZEOF_BOOLEAN);
+ ClassSize.align((19 * ClassSize.REFERENCE) + (6 * Bytes.SIZEOF_LONG)
+ + (5 * Bytes.SIZEOF_INT) + Bytes.SIZEOF_BOOLEAN);
public static final long DEEP_OVERHEAD = ClassSize.align(FIXED_OVERHEAD
+ ClassSize.OBJECT + ClassSize.REENTRANT_LOCK
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
index 9300fc74fe4..2fcf4cd5c70 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
@@ -416,7 +416,6 @@ class MemStoreFlusher extends HasThread implements FlushRequester {
server.compactSplitThread.requestCompaction(region, getName());
}
- server.getMetrics().addFlush(region.getRecentFlushInfo());
} catch (DroppedSnapshotException ex) {
// Cache flush can fail in a few places. If it fails in a critical
// section, we get a DroppedSnapshotException and a replay of hlog
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegion.java
new file mode 100644
index 00000000000..70795305cb8
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegion.java
@@ -0,0 +1,66 @@
+/*
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.CompatibilityFactory;
+
+
+/**
+ * This is the glue between the HRegion and whatever hadoop shim layer
+ * is loaded (hbase-hadoop1-compat or hbase-hadoop2-compat).
+ */
+@InterfaceAudience.Private
+public class MetricsRegion {
+
+ private MetricsRegionSource source;
+
+ public MetricsRegion(MetricsRegionWrapper wrapper) {
+ source = CompatibilityFactory.getInstance(MetricsRegionServerSourceFactory.class)
+ .createRegion(wrapper);
+ }
+
+ public void close() {
+ source.close();
+ }
+
+ public void updatePut() {
+ source.updatePut();
+ }
+
+ public void updateDelete() {
+ source.updateDelete();
+ }
+
+ public void updateGet() {
+ source.updateGet();
+ }
+
+ public void updateAppend() {
+ source.updateAppend();
+ }
+
+ public void updateIncrement() {
+ source.updateIncrement();
+ }
+
+ MetricsRegionSource getSource() {
+ return source;
+ }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java
new file mode 100644
index 00000000000..3c84213e3c8
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java
@@ -0,0 +1,75 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
+
+/**
+ * This class is for maintaining the various regionserver statistics
+ * and publishing them through the metrics interfaces.
+ *
+ * This class has a number of metrics variables that are publicly accessible;
+ * these variables (objects) have methods to update their values.
+ */
+@InterfaceStability.Evolving
+@InterfaceAudience.Private
+public class MetricsRegionServer {
+ private final Log LOG = LogFactory.getLog(this.getClass());
+ private MetricsRegionServerSource serverSource;
+ private MetricsRegionServerWrapper regionServerWrapper;
+
+ public MetricsRegionServer(MetricsRegionServerWrapper regionServerWrapper) {
+ this.regionServerWrapper = regionServerWrapper;
+ serverSource =
+ CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class)
+ .createServer(regionServerWrapper);
+ }
+
+ // for unit-test usage
+ public MetricsRegionServerSource getMetricsSource() {
+ return serverSource;
+ }
+
+ public MetricsRegionServerWrapper getRegionServerWrapper() {
+ return regionServerWrapper;
+ }
+
+ public void updatePut(long t){
+ serverSource.updatePut(t);
+ }
+
+ public void updateDelete(long t){
+ serverSource.updateDelete(t);
+ }
+
+ public void updateGet(long t){
+ serverSource.updateGet(t);
+ }
+
+ public void updateIncrement(long t){
+ serverSource.updateIncrement(t);
+ }
+
+ public void updateAppend(long t){
+ serverSource.updateAppend(t);
+ }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
new file mode 100644
index 00000000000..22f7af7e2be
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
@@ -0,0 +1,395 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
+import org.apache.hadoop.hbase.HDFSBlocksDistribution;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.io.hfile.BlockCache;
+import org.apache.hadoop.hbase.io.hfile.CacheStats;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.apache.hadoop.metrics2.MetricsExecutor;
+
+import java.util.Collection;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Impl for exposing HRegionServer Information through Hadoop's metrics 2 system.
+ */
+@InterfaceAudience.Private
+class MetricsRegionServerWrapperImpl
+ implements MetricsRegionServerWrapper {
+
+ public static final Log LOG = LogFactory.getLog(MetricsRegionServerWrapperImpl.class);
+
+ public static final int PERIOD = 15;
+
+ private final HRegionServer regionServer;
+ private final BlockCache blockCache;
+
+ private volatile long numStores = 0;
+ private volatile long numStoreFiles = 0;
+ private volatile long memstoreSize = 0;
+ private volatile long storeFileSize = 0;
+ private volatile double requestsPerSecond = 0.0;
+ private volatile long readRequestsCount = 0;
+ private volatile long writeRequestsCount = 0;
+ private volatile long checkAndMutateChecksFailed = 0;
+ private volatile long checkAndMutateChecksPassed = 0;
+ private volatile long storefileIndexSize = 0;
+ private volatile long totalStaticIndexSize = 0;
+ private volatile long totalStaticBloomSize = 0;
+ private volatile long numPutsWithoutWAL = 0;
+ private volatile long dataInMemoryWithoutWAL = 0;
+ private volatile int percentFileLocal = 0;
+
+ private CacheStats cacheStats;
+ private ScheduledExecutorService executor;
+ private Runnable runnable;
+
+ public MetricsRegionServerWrapperImpl(final HRegionServer regionServer) {
+ this.regionServer = regionServer;
+ this.blockCache = this.regionServer.cacheConfig.getBlockCache();
+ this.cacheStats = blockCache.getStats();
+ this.executor = CompatibilitySingletonFactory.getInstance(MetricsExecutor.class).getExecutor();
+ this.runnable = new RegionServerMetricsWrapperRunnable();
+ this.executor.scheduleWithFixedDelay(this.runnable, PERIOD, PERIOD, TimeUnit.SECONDS);
+ }
+
+ @Override
+ public String getClusterId() {
+ return regionServer.getClusterId();
+ }
+
+ @Override
+ public long getStartCode() {
+ return regionServer.getStartcode();
+ }
+
+ @Override
+ public String getZookeeperQuorum() {
+ ZooKeeperWatcher zk = regionServer.getZooKeeperWatcher();
+ if (zk == null) {
+ return "";
+ }
+ return zk.getQuorum();
+ }
+
+ @Override
+ public String getCoprocessors() {
+ String[] coprocessors = regionServer.getCoprocessors();
+ if (coprocessors == null || coprocessors.length == 0) {
+ return "";
+ }
+ return StringUtils.join(coprocessors, ", ");
+ }
+
+ @Override
+ public String getServerName() {
+ ServerName serverName = regionServer.getServerName();
+ if (serverName == null) {
+ return "";
+ }
+ return serverName.getServerName();
+ }
+
+ @Override
+ public long getNumOnlineRegions() {
+ Collection onlineRegionsLocalContext = regionServer.getOnlineRegionsLocalContext();
+ if (onlineRegionsLocalContext == null) {
+ return 0;
+ }
+ return onlineRegionsLocalContext.size();
+ }
+
+ @Override
+ public long getTotalRequestCount() {
+ return regionServer.requestCount.get();
+ }
+
+ @Override
+ public int getCompactionQueueSize() {
+ //The thread could be zero. if so assume there is no queue.
+ if (this.regionServer.compactSplitThread == null) {
+ return 0;
+ }
+ return this.regionServer.compactSplitThread.getCompactionQueueSize();
+ }
+
+ @Override
+ public int getFlushQueueSize() {
+ //If there is no flusher there should be no queue.
+ if (this.regionServer.cacheFlusher == null) {
+ return 0;
+ }
+ return this.regionServer.cacheFlusher.getFlushQueueSize();
+ }
+
+ @Override
+ public long getBlockCacheCount() {
+ if (this.blockCache == null) {
+ return 0;
+ }
+ return this.blockCache.size();
+ }
+
+ @Override
+ public long getBlockCacheSize() {
+ if (this.blockCache == null) {
+ return 0;
+ }
+ return this.blockCache.getCurrentSize();
+ }
+
+ @Override
+ public long getBlockCacheFreeSize() {
+ if (this.blockCache == null) {
+ return 0;
+ }
+ return this.blockCache.getFreeSize();
+ }
+
+ @Override
+ public long getBlockCacheHitCount() {
+ if (this.cacheStats == null) {
+ return 0;
+ }
+ return this.cacheStats.getHitCount();
+ }
+
+ @Override
+ public long getBlockCacheMissCount() {
+ if (this.cacheStats == null) {
+ return 0;
+ }
+ return this.cacheStats.getMissCount();
+ }
+
+ @Override
+ public long getBlockCacheEvictedCount() {
+ if (this.cacheStats == null) {
+ return 0;
+ }
+ return this.cacheStats.getEvictedCount();
+ }
+
+ @Override
+ public int getBlockCacheHitPercent() {
+ if (this.cacheStats == null) {
+ return 0;
+ }
+ return (int) (this.cacheStats.getHitRatio() * 100);
+ }
+
+ @Override
+ public int getBlockCacheHitCachingPercent() {
+ if (this.cacheStats == null) {
+ return 0;
+ }
+ return (int) (this.cacheStats.getHitCachingRatio() * 100);
+ }
+
+ @Override public void forceRecompute() {
+ this.runnable.run();
+ }
+
+ @Override
+ public long getNumStores() {
+ return numStores;
+ }
+
+ @Override
+ public long getNumStoreFiles() {
+ return numStoreFiles;
+ }
+
+ @Override
+ public long getMemstoreSize() {
+ return memstoreSize;
+ }
+
+ @Override
+ public long getStoreFileSize() {
+ return storeFileSize;
+ }
+
+ @Override public double getRequestsPerSecond() {
+ return requestsPerSecond;
+ }
+
+ @Override
+ public long getReadRequestsCount() {
+ return readRequestsCount;
+ }
+
+ @Override
+ public long getWriteRequestsCount() {
+ return writeRequestsCount;
+ }
+
+ @Override
+ public long getCheckAndMutateChecksFailed() {
+ return checkAndMutateChecksFailed;
+ }
+
+ @Override
+ public long getCheckAndMutateChecksPassed() {
+ return checkAndMutateChecksPassed;
+ }
+
+ @Override
+ public long getStoreFileIndexSize() {
+ return storefileIndexSize;
+ }
+
+ @Override
+ public long getTotalStaticIndexSize() {
+ return totalStaticIndexSize;
+ }
+
+ @Override
+ public long getTotalStaticBloomSize() {
+ return totalStaticBloomSize;
+ }
+
+ @Override
+ public long getNumPutsWithoutWAL() {
+ return numPutsWithoutWAL;
+ }
+
+ @Override
+ public long getDataInMemoryWithoutWAL() {
+ return dataInMemoryWithoutWAL;
+ }
+
+ @Override
+ public int getPercentFileLocal() {
+ return percentFileLocal;
+ }
+
+ @Override
+ public long getUpdatesBlockedTime() {
+ if (this.regionServer.cacheFlusher == null) {
+ return 0;
+ }
+ return this.regionServer.cacheFlusher.getUpdatesBlockedMsHighWater().get();
+ }
+
+
+ /**
+ * This is the runnable that will be executed on the executor every PERIOD number of seconds
+ * It will take metrics/numbers from all of the regions and use them to compute point in
+ * time metrics.
+ */
+ public class RegionServerMetricsWrapperRunnable implements Runnable {
+
+ private long lastRan = 0;
+ private long lastRequestCount = 0;
+
+ @Override
+ synchronized public void run() {
+
+ cacheStats = blockCache.getStats();
+
+ HDFSBlocksDistribution hdfsBlocksDistribution =
+ new HDFSBlocksDistribution();
+
+ long tempNumStores = 0;
+ long tempNumStoreFiles = 0;
+ long tempMemstoreSize = 0;
+ long tempStoreFileSize = 0;
+ long tempReadRequestsCount = 0;
+ long tempWriteRequestsCount = 0;
+ long tempCheckAndMutateChecksFailed = 0;
+ long tempCheckAndMutateChecksPassed = 0;
+ long tempStorefileIndexSize = 0;
+ long tempTotalStaticIndexSize = 0;
+ long tempTotalStaticBloomSize = 0;
+ long tempNumPutsWithoutWAL = 0;
+ long tempDataInMemoryWithoutWAL = 0;
+ int tempPercentFileLocal = 0;
+
+
+ for (HRegion r : regionServer.getOnlineRegionsLocalContext()) {
+ tempNumPutsWithoutWAL += r.numPutsWithoutWAL.get();
+ tempDataInMemoryWithoutWAL += r.dataInMemoryWithoutWAL.get();
+ tempReadRequestsCount += r.readRequestsCount.get();
+ tempWriteRequestsCount += r.writeRequestsCount.get();
+ tempCheckAndMutateChecksFailed += r.checkAndMutateChecksFailed.get();
+ tempCheckAndMutateChecksPassed += r.checkAndMutateChecksPassed.get();
+ tempNumStores += r.stores.size();
+ for (Store store : r.stores.values()) {
+ tempNumStoreFiles += store.getStorefilesCount();
+ tempMemstoreSize += store.getMemStoreSize();
+ tempStoreFileSize += store.getStorefilesSize();
+ tempStorefileIndexSize += store.getStorefilesIndexSize();
+ tempTotalStaticBloomSize += store.getTotalStaticBloomSize();
+ tempTotalStaticIndexSize += store.getTotalStaticIndexSize();
+ }
+
+ hdfsBlocksDistribution.add(r.getHDFSBlocksDistribution());
+ }
+
+ float localityIndex = hdfsBlocksDistribution.getBlockLocalityIndex(
+ regionServer.getServerName().getHostname());
+ tempPercentFileLocal = (int) (localityIndex * 100);
+
+
+ //Compute the number of requests per second
+ long currentTime = EnvironmentEdgeManager.currentTimeMillis();
+
+ // assume that it took PERIOD seconds to start the executor.
+ // this is a guess but it's a pretty good one.
+ if (lastRan == 0) {
+ lastRan = currentTime - (PERIOD*1000);
+ }
+
+
+ //If we've time traveled keep the last requests per second.
+ if ((currentTime - lastRan) > 10) {
+ long currentRequestCount = getTotalRequestCount();
+ requestsPerSecond = (currentRequestCount - lastRequestCount) / ((currentTime - lastRan) / 1000.0);
+ lastRequestCount = currentRequestCount;
+ }
+ lastRan = currentTime;
+
+ //Copy over computed values so that no thread sees half computed values.
+ numStores = tempNumStores;
+ numStoreFiles = tempNumStoreFiles;
+ memstoreSize = tempMemstoreSize;
+ storeFileSize = tempStoreFileSize;
+ readRequestsCount = tempReadRequestsCount;
+ writeRequestsCount = tempWriteRequestsCount;
+ checkAndMutateChecksFailed = tempCheckAndMutateChecksFailed;
+ checkAndMutateChecksPassed = tempCheckAndMutateChecksPassed;
+ storefileIndexSize = tempStorefileIndexSize;
+ totalStaticIndexSize = tempTotalStaticIndexSize;
+ totalStaticBloomSize = tempTotalStaticBloomSize;
+ numPutsWithoutWAL = tempNumPutsWithoutWAL;
+ dataInMemoryWithoutWAL = tempDataInMemoryWithoutWAL;
+ percentFileLocal = tempPercentFileLocal;
+ }
+ }
+
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java
new file mode 100644
index 00000000000..64d570dcac4
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java
@@ -0,0 +1,105 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
+import org.apache.hadoop.metrics2.MetricsExecutor;
+
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+
+public class MetricsRegionWrapperImpl implements MetricsRegionWrapper {
+
+ public static final int PERIOD = 45;
+
+ private final HRegion region;
+ private ScheduledExecutorService executor;
+ private Runnable runnable;
+ private long numStoreFiles;
+ private long memstoreSize;
+ private long storeFileSize;
+
+ public MetricsRegionWrapperImpl(HRegion region) {
+ this.region = region;
+ this.executor = CompatibilitySingletonFactory.getInstance(MetricsExecutor.class).getExecutor();
+ this.runnable = new HRegionMetricsWrapperRunnable();
+ this.executor.scheduleWithFixedDelay(this.runnable, PERIOD, PERIOD, TimeUnit.SECONDS);
+ }
+
+ @Override
+ public String getTableName() {
+ return this.region.getTableDesc().getNameAsString();
+ }
+
+ @Override
+ public String getRegionName() {
+ return this.region.getRegionInfo().getEncodedName();
+ }
+
+ @Override
+ public long getNumStores() {
+ return this.region.stores.size();
+ }
+
+ @Override
+ public long getNumStoreFiles() {
+ return numStoreFiles;
+ }
+
+ @Override
+ public long getMemstoreSize() {
+ return memstoreSize;
+ }
+
+ @Override
+ public long getStoreFileSize() {
+ return storeFileSize;
+ }
+
+ @Override
+ public long getReadRequestCount() {
+ return this.region.getReadRequestsCount();
+ }
+
+ @Override
+ public long getWriteRequestCount() {
+ return this.region.getWriteRequestsCount();
+ }
+
+ public class HRegionMetricsWrapperRunnable implements Runnable {
+
+ @Override
+ public void run() {
+ long tempNumStoreFiles = 0;
+ long tempMemstoreSize = 0;
+ long tempStoreFileSize = 0;
+
+ for (Store store : region.stores.values()) {
+ tempNumStoreFiles += store.getStorefilesCount();
+ tempMemstoreSize += store.getMemStoreSize();
+ tempStoreFileSize += store.getStorefilesSize();
+ }
+
+ numStoreFiles = tempNumStoreFiles;
+ memstoreSize = tempMemstoreSize;
+ storeFileSize = tempStoreFileSize;
+ }
+ }
+
+}
\ No newline at end of file
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java
index f945ffd277f..0b5a3162339 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java
@@ -66,7 +66,6 @@ class SplitRequest implements Runnable {
if (!st.prepare()) return;
try {
st.execute(this.server, this.server);
- this.server.getMetrics().incrementSplitSuccessCount(System.currentTimeMillis() - startTime);
} catch (Exception e) {
if (this.server.isStopping() || this.server.isStopped()) {
LOG.info(
@@ -81,7 +80,6 @@ class SplitRequest implements Runnable {
if (st.rollback(this.server, this.server)) {
LOG.info("Successful rollback of failed split of " +
parent.getRegionNameAsString());
- this.server.getMetrics().incrementSplitFailureCount();
} else {
this.server.abort("Abort; we got an error after point-of-no-return");
}
@@ -102,7 +100,6 @@ class SplitRequest implements Runnable {
} catch (IOException ex) {
LOG.error("Split failed " + this, RemoteExceptionHandler
.checkIOException(ex));
- this.server.getMetrics().incrementSplitFailureCount();
server.checkFileSystem();
} finally {
if (this.parent.getCoprocessorHost() != null) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
index d391a16df74..1841eebf323 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
@@ -32,7 +32,6 @@ import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
-import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics.SchemaAware;
import com.google.common.collect.ImmutableList;
@@ -42,7 +41,7 @@ import com.google.common.collect.ImmutableList;
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
-public interface Store extends SchemaAware, HeapSize {
+public interface Store extends HeapSize {
/* The default priority for user-specified compaction requests.
* The user gets top priority unless we have blocking compactions. (Pri <= 0)
@@ -287,4 +286,8 @@ public interface Store extends SchemaAware, HeapSize {
* @return the parent region hosting this store
*/
public HRegion getHRegion();
+
+ public String getColumnFamilyName();
+
+ public String getTableName();
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
index 203aecf50d6..a240f94c9ac 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
@@ -58,8 +58,6 @@ import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
import org.apache.hadoop.hbase.io.hfile.HFileWriterV1;
import org.apache.hadoop.hbase.io.hfile.HFileWriterV2;
-import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
-import org.apache.hadoop.hbase.regionserver.metrics.SchemaConfigured;
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder;
import org.apache.hadoop.hbase.util.ChecksumType;
@@ -80,7 +78,7 @@ import com.google.common.collect.Ordering;
/**
* A Store data file. Stores usually have one or more of these files. They
* are produced by flushing the memstore to disk. To
- * create, instantiate a writer using {@link StoreFile#WriterBuilder}
+ * create, instantiate a writer using {@link StoreFile.WriterBuilder}
* and append data. Be sure to add any metadata before calling close on the
* Writer (Use the appendMetadata convenience methods). On close, a StoreFile
* is sitting in the Filesystem. To refer to it, create a StoreFile instance
@@ -91,7 +89,7 @@ import com.google.common.collect.Ordering;
* writer and a reader is that we write once but read a lot more.
*/
@InterfaceAudience.LimitedPrivate("Coprocessor")
-public class StoreFile extends SchemaConfigured {
+public class StoreFile {
static final Log LOG = LogFactory.getLog(StoreFile.class.getName());
public static enum BloomType {
@@ -277,7 +275,6 @@ public class StoreFile extends SchemaConfigured {
this.modificationTimeStamp = 0;
}
- SchemaMetrics.configureGlobally(conf);
}
/**
@@ -545,11 +542,6 @@ public class StoreFile extends SchemaConfigured {
dataBlockEncoder.getEncodingInCache());
}
- if (isSchemaConfigured()) {
- SchemaConfigured.resetSchemaMetricsConf(reader);
- passSchemaMetricsTo(reader);
- }
-
computeHDFSBlockDistribution();
// Load up indices and fileinfo. This also loads Bloom filter type.
@@ -1287,7 +1279,7 @@ public class StoreFile extends SchemaConfigured {
/**
* Reader for a StoreFile.
*/
- public static class Reader extends SchemaConfigured {
+ public static class Reader {
static final Log LOG = LogFactory.getLog(Reader.class.getName());
protected BloomFilter generalBloomFilter = null;
@@ -1301,7 +1293,6 @@ public class StoreFile extends SchemaConfigured {
public Reader(FileSystem fs, Path path, CacheConfig cacheConf,
DataBlockEncoding preferredEncodingInCache) throws IOException {
- super(path);
reader = HFile.createReaderWithEncoding(fs, path, cacheConf,
preferredEncodingInCache);
bloomFilterType = BloomType.NONE;
@@ -1310,7 +1301,6 @@ public class StoreFile extends SchemaConfigured {
public Reader(FileSystem fs, Path path, HFileLink hfileLink, long size,
CacheConfig cacheConf, DataBlockEncoding preferredEncodingInCache,
boolean closeIStream) throws IOException {
- super(path);
FSDataInputStream in = hfileLink.open(fs);
FSDataInputStream inNoChecksum = in;
@@ -1584,7 +1574,6 @@ public class StoreFile extends SchemaConfigured {
&& bloomFilter.contains(key, 0, key.length, bloom);
}
- getSchemaMetrics().updateBloomMetrics(exists);
return exists;
}
} catch (IOException e) {
@@ -1728,10 +1717,6 @@ public class StoreFile extends SchemaConfigured {
return reader.indexSize();
}
- public String getColumnFamilyName() {
- return reader.getColumnFamilyName();
- }
-
public BloomType getBloomFilterType() {
return this.bloomFilterType;
}
@@ -1774,11 +1759,6 @@ public class StoreFile extends SchemaConfigured {
public long getMaxTimestamp() {
return timeRangeTracker.maximumTimestamp;
}
-
- @Override
- public void schemaConfigurationChanged() {
- passSchemaMetricsTo((SchemaConfigured) reader);
- }
}
/**
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
index b595c066bac..4ccdbccff36 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
@@ -33,8 +33,6 @@ import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.regionserver.HStore.ScanInfo;
-import org.apache.hadoop.hbase.regionserver.metrics.RegionMetricsStorage;
-import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@@ -110,7 +108,6 @@ public class StoreScanner extends NonLazyKeyValueScanner
throws IOException {
this(store, scan.getCacheBlocks(), scan, columns, scanInfo.getTtl(),
scanInfo.getMinVersions());
- initializeMetricNames();
if (columns != null && scan.isRaw()) {
throw new DoNotRetryIOException(
"Cannot specify any column for a raw scan");
@@ -163,7 +160,6 @@ public class StoreScanner extends NonLazyKeyValueScanner
long smallestReadPoint, long earliestPutTs) throws IOException {
this(store, false, scan, null, scanInfo.getTtl(),
scanInfo.getMinVersions());
- initializeMetricNames();
matcher = new ScanQueryMatcher(scan, scanInfo, null, scanType,
smallestReadPoint, earliestPutTs, oldestUnexpiredTS);
@@ -194,7 +190,6 @@ public class StoreScanner extends NonLazyKeyValueScanner
throws IOException {
this(null, scan.getCacheBlocks(), scan, columns, scanInfo.getTtl(),
scanInfo.getMinVersions());
- this.initializeMetricNames();
this.matcher = new ScanQueryMatcher(scan, scanInfo, columns, scanType,
Long.MAX_VALUE, earliestPutTs, oldestUnexpiredTS);
@@ -205,23 +200,6 @@ public class StoreScanner extends NonLazyKeyValueScanner
heap = new KeyValueHeap(scanners, scanInfo.getComparator());
}
- /**
- * Method used internally to initialize metric names throughout the
- * constructors.
- *
- * To be called after the store variable has been initialized!
- */
- private void initializeMetricNames() {
- String tableName = SchemaMetrics.UNKNOWN;
- String family = SchemaMetrics.UNKNOWN;
- if (store != null) {
- tableName = store.getTableName();
- family = Bytes.toString(store.getFamily().getName());
- }
- this.metricNamePrefix =
- SchemaMetrics.generateSchemaMetricsPrefix(tableName, family);
- }
-
/**
* Get a filtered list of scanners. Assumes we are not in a compaction.
* @return list of scanners to seek
@@ -458,8 +436,7 @@ public class StoreScanner extends NonLazyKeyValueScanner
}
} finally {
if (cumulativeMetric > 0 && metric != null) {
- RegionMetricsStorage.incrNumericMetric(this.metricNamePrefix + metric,
- cumulativeMetric);
+
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java
index bee966800ca..80e4d5ee090 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java
@@ -253,7 +253,6 @@ public class CompactionRequest implements Comparable,
LOG.info(((completed) ? "completed" : "aborted") + " compaction: " +
this + "; duration=" + StringUtils.formatTimeDiff(now, start));
if (completed) {
- server.getMetrics().addCompaction(now - start, this.totalSize);
// degenerate case: blocked regions require recursive enqueues
if (s.getCompactPriority() <= 0) {
server.compactSplitThread
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/OperationMetrics.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/OperationMetrics.java
deleted file mode 100644
index bff18c33b13..00000000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/OperationMetrics.java
+++ /dev/null
@@ -1,225 +0,0 @@
-/*
- * Copyright The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.hadoop.hbase.regionserver.metrics;
-
-import java.util.Set;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.client.Append;
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Increment;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.util.Bytes;
-
-/**
- * This class provides a simplified interface to expose time varying metrics
- * about GET/DELETE/PUT/ICV operations on a region and on Column Families. All
- * metrics are stored in {@link RegionMetricsStorage} and exposed to hadoop
- * metrics through {@link RegionServerDynamicMetrics}.
- */
-@InterfaceAudience.Private
-public class OperationMetrics {
-
- private static final String DELETE_KEY = "delete_";
- private static final String PUT_KEY = "put_";
- private static final String GET_KEY = "get_";
- private static final String ICV_KEY = "incrementColumnValue_";
- private static final String INCREMENT_KEY = "increment_";
- private static final String MULTIPUT_KEY = "multiput_";
- private static final String MULTIDELETE_KEY = "multidelete_";
- private static final String APPEND_KEY = "append_";
-
- /** Conf key controlling whether we should expose metrics.*/
- private static final String CONF_KEY =
- "hbase.metrics.exposeOperationTimes";
-
- private String tableName = null;
- private String regionName = null;
- private String regionMetrixPrefix = null;
- private Configuration conf = null;
-
-
- /**
- * Create a new OperationMetrics
- * @param conf The Configuration of the HRegion reporting operations coming in.
- * @param regionInfo The region info
- */
- public OperationMetrics(Configuration conf, HRegionInfo regionInfo) {
- // Configure SchemaMetrics before trying to create a RegionOperationMetrics instance as
- // RegionOperationMetrics relies on SchemaMetrics to do naming.
- if (conf != null) {
- SchemaMetrics.configureGlobally(conf);
-
- this.conf = conf;
- if (regionInfo != null) {
- this.tableName = regionInfo.getTableNameAsString();
- this.regionName = regionInfo.getEncodedName();
- } else {
- this.tableName = SchemaMetrics.UNKNOWN;
- this.regionName = SchemaMetrics.UNKNOWN;
- }
- this.regionMetrixPrefix =
- SchemaMetrics.generateRegionMetricsPrefix(this.tableName, this.regionName);
- }
- }
-
- /**
- * This is used in creating a testing HRegion where the regionInfo is unknown
- * @param conf
- */
- public OperationMetrics() {
- this(null, null);
- }
-
-
- /**
- * Update the stats associated with {@link HTable#put(java.util.List)}.
- *
- * @param columnFamilies Set of CF's this multiput is associated with
- * @param value the time
- */
- public void updateMultiPutMetrics(Set columnFamilies, long value) {
- doUpdateTimeVarying(columnFamilies, MULTIPUT_KEY, value);
- }
-
- /**
- * Update the stats associated with {@link HTable#delete(java.util.List)}.
- *
- * @param columnFamilies Set of CF's this multidelete is associated with
- * @param value the time
- */
- public void updateMultiDeleteMetrics(Set columnFamilies, long value) {
- doUpdateTimeVarying(columnFamilies, MULTIDELETE_KEY, value);
- }
-
- /**
- * Update the metrics associated with a {@link Get}
- *
- * @param columnFamilies
- * Set of Column Families in this get.
- * @param value
- * the time
- */
- public void updateGetMetrics(Set columnFamilies, long value) {
- doUpdateTimeVarying(columnFamilies, GET_KEY, value);
- }
-
- /**
- * Update metrics associated with an {@link Increment}
- * @param columnFamilies
- * @param value
- */
- public void updateIncrementMetrics(Set columnFamilies, long value) {
- doUpdateTimeVarying(columnFamilies, INCREMENT_KEY, value);
- }
-
-
- /**
- * Update the metrics associated with an {@link Append}
- * @param columnFamilies
- * @param value
- */
- public void updateAppendMetrics(Set columnFamilies, long value) {
- doUpdateTimeVarying(columnFamilies, APPEND_KEY, value);
- }
-
-
- /**
- * Update the metrics associated with
- * {@link HTable#incrementColumnValue(byte[], byte[], byte[], long)}
- *
- * @param columnFamily
- * The single column family associated with an ICV
- * @param value
- * the time
- */
- public void updateIncrementColumnValueMetrics(byte[] columnFamily, long value) {
- String cfMetricPrefix =
- SchemaMetrics.generateSchemaMetricsPrefix(this.tableName, Bytes.toString(columnFamily));
- doSafeIncTimeVarying(cfMetricPrefix, ICV_KEY, value);
- doSafeIncTimeVarying(this.regionMetrixPrefix, ICV_KEY, value);
- }
-
- /**
- * update metrics associated with a {@link Put}
- *
- * @param columnFamilies
- * Set of column families involved.
- * @param value
- * the time.
- */
- public void updatePutMetrics(Set columnFamilies, long value) {
- doUpdateTimeVarying(columnFamilies, PUT_KEY, value);
- }
-
- /**
- * update metrics associated with a {@link Delete}
- *
- * @param columnFamilies
- * @param value
- * the time.
- */
- public void updateDeleteMetrics(Set columnFamilies, long value) {
- doUpdateTimeVarying(columnFamilies, DELETE_KEY, value);
- }
-
- /**
- * This deletes all old metrics this instance has ever created or updated.
- */
- public void closeMetrics() {
- RegionMetricsStorage.clear();
- }
-
- /**
- * Method to send updates for cf and region metrics. This is the normal method
- * used if the naming of stats and CF's are in line with put/delete/multiput.
- *
- * @param columnFamilies
- * the set of column families involved.
- * @param key
- * the metric name.
- * @param value
- * the time.
- */
- private void doUpdateTimeVarying(Set columnFamilies, String key, long value) {
- String cfPrefix = null;
- if (columnFamilies != null) {
- cfPrefix = SchemaMetrics.generateSchemaMetricsPrefix(tableName, columnFamilies);
- } else {
- cfPrefix = SchemaMetrics.generateSchemaMetricsPrefix(tableName, SchemaMetrics.UNKNOWN);
- }
-
- doSafeIncTimeVarying(cfPrefix, key, value);
- doSafeIncTimeVarying(this.regionMetrixPrefix, key, value);
- }
-
- private void doSafeIncTimeVarying(String prefix, String key, long value) {
- if (conf.getBoolean(CONF_KEY, true)) {
- if (prefix != null && !prefix.isEmpty() && key != null && !key.isEmpty()) {
- String m = prefix + key;
- RegionMetricsStorage.incrTimeVaryingMetric(m, value);
- }
- }
- }
-
-}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionMetricsStorage.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionMetricsStorage.java
deleted file mode 100644
index 5d4beffc2e2..00000000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionMetricsStorage.java
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * Copyright The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.hadoop.hbase.regionserver.metrics;
-
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicLong;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.util.Pair;
-
-/**
- * This class if for maintaining the maps used to power metrics for hfiles,
- * regions, and regionservers. It has methods to mutate and get state of metrics
- * numbers. These numbers are exposed to Hadoop metrics through
- * RegionServerDynamicMetrics.
- */
-@InterfaceAudience.Private
-public class RegionMetricsStorage {
-
- // for simple numeric metrics (# of blocks read from block cache)
- private static final ConcurrentMap numericMetrics =
- new ConcurrentHashMap();
-
- // for simple numeric metrics (current block cache size)
- // These ones are not reset to zero when queried, unlike the previous.
- private static final ConcurrentMap numericPersistentMetrics =
- new ConcurrentHashMap();
-
- /**
- * Used for metrics where we want track a metrics (such as latency) over a
- * number of operations.
- */
- private static final ConcurrentMap> timeVaryingMetrics =
- new ConcurrentHashMap>();
-
- public static Map getNumericMetrics() {
- return numericMetrics;
- }
-
- public static Map getNumericPersistentMetrics() {
- return numericPersistentMetrics;
- }
-
- public static Map> getTimeVaryingMetrics() {
- return timeVaryingMetrics;
- }
-
- public static void incrNumericMetric(String key, long amount) {
- AtomicLong oldVal = numericMetrics.get(key);
- if (oldVal == null) {
- oldVal = numericMetrics.putIfAbsent(key, new AtomicLong(amount));
- if (oldVal == null)
- return;
- }
- oldVal.addAndGet(amount);
- }
-
- public static void incrTimeVaryingMetric(String key, long amount) {
- Pair oldVal = timeVaryingMetrics.get(key);
- if (oldVal == null) {
- oldVal =
- timeVaryingMetrics.putIfAbsent(key,
- new Pair(
- new AtomicLong(amount),
- new AtomicInteger(1)));
- if (oldVal == null)
- return;
- }
- oldVal.getFirst().addAndGet(amount); // total time
- oldVal.getSecond().incrementAndGet(); // increment ops by 1
- }
-
- public static void incrNumericPersistentMetric(String key, long amount) {
- AtomicLong oldVal = numericPersistentMetrics.get(key);
- if (oldVal == null) {
- oldVal = numericPersistentMetrics.putIfAbsent(key, new AtomicLong(amount));
- if (oldVal == null)
- return;
- }
- oldVal.addAndGet(amount);
- }
-
- public static void setNumericMetric(String key, long amount) {
- numericMetrics.put(key, new AtomicLong(amount));
- }
-
- public static long getNumericMetric(String key) {
- AtomicLong m = numericMetrics.get(key);
- if (m == null)
- return 0;
- return m.get();
- }
-
- public static Pair getTimeVaryingMetric(String key) {
- Pair pair = timeVaryingMetrics.get(key);
- if (pair == null) {
- return new Pair(0L, 0);
- }
-
- return new Pair(pair.getFirst().get(), pair.getSecond().get());
- }
-
- public static long getNumericPersistentMetric(String key) {
- AtomicLong m = numericPersistentMetrics.get(key);
- if (m == null)
- return 0;
- return m.get();
- }
-
- /**
- * Clear all copies of the metrics this stores.
- */
- public static void clear() {
- timeVaryingMetrics.clear();
- numericMetrics.clear();
- numericPersistentMetrics.clear();
- }
-}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerDynamicMetrics.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerDynamicMetrics.java
deleted file mode 100644
index bb06a10d3f4..00000000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerDynamicMetrics.java
+++ /dev/null
@@ -1,230 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.regionserver.metrics;
-
-import java.lang.reflect.Field;
-import java.lang.reflect.Method;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.metrics.MetricsContext;
-import org.apache.hadoop.metrics.MetricsRecord;
-import org.apache.hadoop.metrics.MetricsUtil;
-import org.apache.hadoop.metrics.Updater;
-import org.apache.hadoop.metrics.util.MetricsBase;
-import org.apache.hadoop.metrics.util.MetricsLongValue;
-import org.apache.hadoop.metrics.util.MetricsRegistry;
-import org.apache.hadoop.metrics.util.MetricsTimeVaryingRate;
-
-/**
- *
- * This class is for maintaining the various RPC statistics
- * and publishing them through the metrics interfaces.
- * This also registers the JMX MBean for RPC.
- *
- * This class has a number of metrics variables that are publicly accessible;
- * these variables (objects) have methods to update their values;
- * for example: rpcQueueTime.inc(time)
- *
- */
-@InterfaceAudience.Private
-public class RegionServerDynamicMetrics implements Updater {
- private static final String UNABLE_TO_CLEAR = "Unable to clear RegionServerDynamicMetrics";
-
- private MetricsRecord metricsRecord;
- private MetricsContext context;
- private final RegionServerDynamicStatistics rsDynamicStatistics;
- private Method updateMbeanInfoIfMetricsListChanged = null;
- private static final Log LOG =
- LogFactory.getLog(RegionServerDynamicStatistics.class);
-
- private boolean reflectionInitialized = false;
- private boolean needsUpdateMessage = false;
- private Field recordMetricMapField;
- private Field registryMetricMapField;
-
- /**
- * The metrics variables are public:
- * - they can be set directly by calling their set/inc methods
- * -they can also be read directly - e.g. JMX does this.
- */
- public final MetricsRegistry registry = new MetricsRegistry();
-
- private RegionServerDynamicMetrics() {
- this.context = MetricsUtil.getContext("hbase-dynamic");
- this.metricsRecord = MetricsUtil.createRecord(
- this.context,
- "RegionServerDynamicStatistics");
- context.registerUpdater(this);
- this.rsDynamicStatistics = new RegionServerDynamicStatistics(this.registry);
- try {
- updateMbeanInfoIfMetricsListChanged =
- this.rsDynamicStatistics.getClass().getSuperclass()
- .getDeclaredMethod("updateMbeanInfoIfMetricsListChanged",
- new Class[]{});
- updateMbeanInfoIfMetricsListChanged.setAccessible(true);
- } catch (Exception e) {
- LOG.error(e);
- }
- }
-
- public static RegionServerDynamicMetrics newInstance() {
- RegionServerDynamicMetrics metrics =
- new RegionServerDynamicMetrics();
- return metrics;
- }
-
- public synchronized void setNumericMetric(String name, long amt) {
- MetricsLongValue m = (MetricsLongValue)registry.get(name);
- if (m == null) {
- m = new MetricsLongValue(name, this.registry);
- this.needsUpdateMessage = true;
- }
- m.set(amt);
- }
-
- public synchronized void incrTimeVaryingMetric(
- String name,
- long amt,
- int numOps) {
- MetricsTimeVaryingRate m = (MetricsTimeVaryingRate)registry.get(name);
- if (m == null) {
- m = new MetricsTimeVaryingRate(name, this.registry);
- this.needsUpdateMessage = true;
- }
- if (numOps > 0) {
- m.inc(numOps, amt);
- }
- }
-
- /**
- * Clear all metrics this exposes.
- * Uses reflection to clear them from hadoop metrics side as well.
- */
- @SuppressWarnings("rawtypes")
- public void clear() {
- this.needsUpdateMessage = true;
- // If this is the first clear use reflection to get the two maps that hold copies of our
- // metrics on the hadoop metrics side. We have to use reflection because there is not
- // remove metrics on the hadoop side. If we can't get them then clearing old metrics
- // is not possible and bailing out early is our best option.
- if (!this.reflectionInitialized) {
- this.reflectionInitialized = true;
- try {
- this.recordMetricMapField = this.metricsRecord.getClass().getDeclaredField("metricTable");
- this.recordMetricMapField.setAccessible(true);
- } catch (SecurityException e) {
- LOG.debug(UNABLE_TO_CLEAR);
- return;
- } catch (NoSuchFieldException e) {
- LOG.debug(UNABLE_TO_CLEAR);
- return;
- }
-
- try {
- this.registryMetricMapField = this.registry.getClass().getDeclaredField("metricsList");
- this.registryMetricMapField.setAccessible(true);
- } catch (SecurityException e) {
- LOG.debug(UNABLE_TO_CLEAR);
- return;
- } catch (NoSuchFieldException e) {
- LOG.debug(UNABLE_TO_CLEAR);
- return;
- }
- }
-
-
- //If we found both fields then try and clear the maps.
- if (this.recordMetricMapField != null && this.registryMetricMapField != null) {
- try {
- Map recordMap = (Map) this.recordMetricMapField.get(this.metricsRecord);
- recordMap.clear();
- Map registryMap = (Map) this.registryMetricMapField.get(this.registry);
- registryMap.clear();
- } catch (IllegalArgumentException e) {
- LOG.debug(UNABLE_TO_CLEAR);
- } catch (IllegalAccessException e) {
- LOG.debug(UNABLE_TO_CLEAR);
- }
- } else {
- LOG.debug(UNABLE_TO_CLEAR);
- }
- }
-
- /**
- * Push the metrics to the monitoring subsystem on doUpdate() call.
- * @param context ctx
- */
- public void doUpdates(MetricsContext context) {
- /* get dynamically created numeric metrics, and push the metrics */
- for (Entry entry : RegionMetricsStorage.getNumericMetrics().entrySet()) {
- this.setNumericMetric(entry.getKey(), entry.getValue().getAndSet(0));
- }
- /* get dynamically created numeric metrics, and push the metrics.
- * These ones aren't to be reset; they are cumulative. */
- for (Entry entry : RegionMetricsStorage.getNumericPersistentMetrics().entrySet()) {
- this.setNumericMetric(entry.getKey(), entry.getValue().get());
- }
- /* get dynamically created time varying metrics, and push the metrics */
- for (Entry> entry :
- RegionMetricsStorage.getTimeVaryingMetrics().entrySet()) {
- Pair value = entry.getValue();
- this.incrTimeVaryingMetric(entry.getKey(),
- value.getFirst().getAndSet(0),
- value.getSecond().getAndSet(0));
- }
-
- // If there are new metrics sending this message to jmx tells it to update everything.
- // This is not ideal we should just move to metrics2 that has full support for dynamic metrics.
- if (needsUpdateMessage) {
- try {
- if (updateMbeanInfoIfMetricsListChanged != null) {
- updateMbeanInfoIfMetricsListChanged.invoke(this.rsDynamicStatistics,
- new Object[]{});
- }
- } catch (Exception e) {
- LOG.error(e);
- }
- needsUpdateMessage = false;
- }
-
-
- synchronized (registry) {
- // Iterate through the registry to propagate the different rpc metrics.
- for (String metricName : registry.getKeyList() ) {
- MetricsBase value = registry.get(metricName);
- value.pushMetric(metricsRecord);
- }
- }
- metricsRecord.update();
- }
-
- public void shutdown() {
- if (rsDynamicStatistics != null)
- rsDynamicStatistics.shutdown();
- }
-}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerDynamicStatistics.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerDynamicStatistics.java
deleted file mode 100644
index b4df6a76e63..00000000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerDynamicStatistics.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.regionserver.metrics;
-
-import javax.management.ObjectName;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.metrics.MetricsMBeanBase;
-import org.apache.hadoop.metrics.util.MBeanUtil;
-import org.apache.hadoop.metrics.util.MetricsRegistry;
-
-/**
- * Exports dynamic region server metric recorded in
- * {@link RegionServerDynamicMetrics} as an MBean
- * for JMX monitoring.
- */
-@InterfaceAudience.Private
-public class RegionServerDynamicStatistics extends MetricsMBeanBase {
- private final ObjectName mbeanName;
-
- public RegionServerDynamicStatistics(MetricsRegistry registry) {
- super(registry, "RegionServerDynamicStatistics");
- mbeanName = MBeanUtil.registerMBean("RegionServer", "RegionServerDynamicStatistics", this);
- }
-
- public void shutdown() {
- if (mbeanName != null)
- MBeanUtil.unregisterMBean(mbeanName);
- }
-
-}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java
deleted file mode 100644
index d8883e93062..00000000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java
+++ /dev/null
@@ -1,626 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.regionserver.metrics;
-
-import java.io.IOException;
-import java.lang.management.ManagementFactory;
-import java.lang.management.MemoryUsage;
-import java.util.List;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.io.hfile.HFile;
-import org.apache.hadoop.hbase.metrics.ExactCounterMetric;
-import org.apache.hadoop.hbase.metrics.HBaseInfo;
-import org.apache.hadoop.hbase.metrics.MetricsRate;
-import org.apache.hadoop.hbase.metrics.histogram.MetricsHistogram;
-import org.apache.hadoop.hbase.metrics.PersistentMetricsTimeVaryingRate;
-import com.yammer.metrics.stats.Snapshot;
-import org.apache.hadoop.hbase.regionserver.wal.HLogMetrics;
-import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.hbase.util.Strings;
-import org.apache.hadoop.metrics.ContextFactory;
-import org.apache.hadoop.metrics.MetricsContext;
-import org.apache.hadoop.metrics.MetricsRecord;
-import org.apache.hadoop.metrics.MetricsUtil;
-import org.apache.hadoop.metrics.Updater;
-import org.apache.hadoop.metrics.jvm.JvmMetrics;
-import org.apache.hadoop.metrics.util.MetricsIntValue;
-import org.apache.hadoop.metrics.util.MetricsLongValue;
-import org.apache.hadoop.metrics.util.MetricsRegistry;
-import org.apache.hadoop.metrics.util.MetricsTimeVaryingRate;
-import org.apache.hadoop.metrics.util.MetricsTimeVaryingLong;
-import org.apache.hadoop.util.StringUtils;
-
-/**
- * This class is for maintaining the various regionserver statistics
- * and publishing them through the metrics interfaces.
- *
- * This class has a number of metrics variables that are publicly accessible;
- * these variables (objects) have methods to update their values.
- */
-@InterfaceAudience.Private
-public class RegionServerMetrics implements Updater {
- @SuppressWarnings({"FieldCanBeLocal"})
- private final Log LOG = LogFactory.getLog(this.getClass());
- private final MetricsRecord metricsRecord;
- private long lastUpdate = System.currentTimeMillis();
- private long lastExtUpdate = System.currentTimeMillis();
- private long extendedPeriod = 0;
- private static final int MB = 1024*1024;
- private MetricsRegistry registry = new MetricsRegistry();
- private final RegionServerStatistics statistics;
-
- public final MetricsTimeVaryingRate atomicIncrementTime =
- new MetricsTimeVaryingRate("atomicIncrementTime", registry);
-
- /**
- * Count of regions carried by this regionserver
- */
- public final MetricsIntValue regions =
- new MetricsIntValue("regions", registry);
-
- /**
- * Block cache size.
- */
- public final MetricsLongValue blockCacheSize =
- new MetricsLongValue("blockCacheSize", registry);
-
- /**
- * Block cache free size.
- */
- public final MetricsLongValue blockCacheFree =
- new MetricsLongValue("blockCacheFree", registry);
-
- /**
- * Block cache item count.
- */
- public final MetricsLongValue blockCacheCount =
- new MetricsLongValue("blockCacheCount", registry);
-
- /**
- * Block cache hit count.
- */
- public final MetricsLongValue blockCacheHitCount =
- new MetricsLongValue("blockCacheHitCount", registry);
-
- /**
- * Block cache miss count.
- */
- public final MetricsLongValue blockCacheMissCount =
- new MetricsLongValue("blockCacheMissCount", registry);
-
- /**
- * Block cache evict count.
- */
- public final MetricsLongValue blockCacheEvictedCount =
- new MetricsLongValue("blockCacheEvictedCount", registry);
-
- /**
- * Block hit ratio.
- */
- public final MetricsIntValue blockCacheHitRatio =
- new MetricsIntValue("blockCacheHitRatio", registry);
-
- /**
- * Block hit caching ratio. This only includes the requests to the block
- * cache where caching was turned on. See HBASE-2253.
- */
- public final MetricsIntValue blockCacheHitCachingRatio =
- new MetricsIntValue("blockCacheHitCachingRatio", registry);
-
- /** Block hit ratio for past N periods. */
- public final MetricsIntValue blockCacheHitRatioPastNPeriods = new MetricsIntValue("blockCacheHitRatioPastNPeriods", registry);
-
- /** Block hit caching ratio for past N periods */
- public final MetricsIntValue blockCacheHitCachingRatioPastNPeriods = new MetricsIntValue("blockCacheHitCachingRatioPastNPeriods", registry);
-
- /*
- * Count of requests to the regionservers since last call to metrics update
- */
- public final MetricsRate requests = new MetricsRate("requests", registry);
-
- /**
- * Count of stores open on the regionserver.
- */
- public final MetricsIntValue stores = new MetricsIntValue("stores", registry);
-
- /**
- * Count of storefiles open on the regionserver.
- */
- public final MetricsIntValue storefiles =
- new MetricsIntValue("storefiles", registry);
-
- /**
- * Count of read requests
- */
- public final MetricsLongValue readRequestsCount =
- new MetricsLongValue("readRequestsCount", registry);
-
- /**
- * Count of write requests
- */
- public final MetricsLongValue writeRequestsCount =
- new MetricsLongValue("writeRequestsCount", registry);
-
- /**
- * Count of checkAndMutates the failed the check
- */
- public final MetricsLongValue checkAndMutateChecksFailed =
- new MetricsLongValue("checkAndMutateChecksFailed", registry);
-
- /**
- * Count of checkAndMutates that passed the check
- */
- public final MetricsLongValue checkAndMutateChecksPassed =
- new MetricsLongValue("checkAndMutateChecksPassed", registry);
- /**
- */
- public final MetricsIntValue storefileIndexSizeMB =
- new MetricsIntValue("storefileIndexSizeMB", registry);
-
- /** The total size of block index root levels in this regionserver in KB. */
- public final MetricsIntValue rootIndexSizeKB =
- new MetricsIntValue("rootIndexSizeKB", registry);
-
- /** Total size of all block indexes (not necessarily loaded in memory) */
- public final MetricsIntValue totalStaticIndexSizeKB =
- new MetricsIntValue("totalStaticIndexSizeKB", registry);
-
- /** Total size of all Bloom filters (not necessarily loaded in memory) */
- public final MetricsIntValue totalStaticBloomSizeKB =
- new MetricsIntValue("totalStaticBloomSizeKB", registry);
-
- /**
- * HDFS blocks locality index
- */
- public final MetricsIntValue hdfsBlocksLocalityIndex =
- new MetricsIntValue("hdfsBlocksLocalityIndex", registry);
-
- /**
- * Sum of all the memstore sizes in this regionserver in MB
- */
- public final MetricsIntValue memstoreSizeMB =
- new MetricsIntValue("memstoreSizeMB", registry);
-
- /**
- * Number of put with WAL disabled in this regionserver in MB
- */
- public final MetricsLongValue numPutsWithoutWAL =
- new MetricsLongValue("numPutsWithoutWAL", registry);
-
- /**
- * Possible data loss sizes (due to put with WAL disabled) in this regionserver in MB
- */
- public final MetricsIntValue mbInMemoryWithoutWAL =
- new MetricsIntValue("mbInMemoryWithoutWAL", registry);
-
- /**
- * Size of the compaction queue.
- */
- public final MetricsIntValue compactionQueueSize =
- new MetricsIntValue("compactionQueueSize", registry);
-
- /**
- * Size of the flush queue.
- */
- public final MetricsIntValue flushQueueSize =
- new MetricsIntValue("flushQueueSize", registry);
-
- /**
- * filesystem sequential read latency distribution
- */
- public final MetricsHistogram fsReadLatencyHistogram =
- new MetricsHistogram("fsReadLatencyHistogram", registry);
-
- /**
- * filesystem pread latency distribution
- */
- public final MetricsHistogram fsPreadLatencyHistogram =
- new MetricsHistogram("fsPreadLatencyHistogram", registry);
-
- /**
- * Metrics on the distribution of filesystem write latencies (improved version of fsWriteLatency)
- */
- public final MetricsHistogram fsWriteLatencyHistogram =
- new MetricsHistogram("fsWriteLatencyHistogram", registry);
-
-
- /**
- * filesystem read latency
- */
- public final MetricsTimeVaryingRate fsReadLatency =
- new MetricsTimeVaryingRate("fsReadLatency", registry);
-
- /**
- * filesystem positional read latency
- */
- public final MetricsTimeVaryingRate fsPreadLatency =
- new MetricsTimeVaryingRate("fsPreadLatency", registry);
-
- /**
- * filesystem write latency
- */
- public final MetricsTimeVaryingRate fsWriteLatency =
- new MetricsTimeVaryingRate("fsWriteLatency", registry);
-
- /**
- * size (in bytes) of data in HLog append calls
- */
- public final MetricsTimeVaryingRate fsWriteSize =
- new MetricsTimeVaryingRate("fsWriteSize", registry);
-
- /**
- * filesystem sync latency
- */
- public final MetricsTimeVaryingRate fsSyncLatency =
- new MetricsTimeVaryingRate("fsSyncLatency", registry);
-
-
- /**
- * time each scheduled compaction takes
- */
- protected final MetricsHistogram compactionTime =
- new MetricsHistogram("compactionTime", registry);
-
- protected final MetricsHistogram compactionSize =
- new MetricsHistogram("compactionSize", registry);
-
- /**
- * time each scheduled flush takes
- */
- protected final MetricsHistogram flushTime =
- new MetricsHistogram("flushTime", registry);
-
- protected final MetricsHistogram flushSize =
- new MetricsHistogram("flushSize", registry);
-
- public final MetricsLongValue slowHLogAppendCount =
- new MetricsLongValue("slowHLogAppendCount", registry);
-
- public final MetricsTimeVaryingRate slowHLogAppendTime =
- new MetricsTimeVaryingRate("slowHLogAppendTime", registry);
-
- public final PersistentMetricsTimeVaryingRate regionSplitSuccessCount =
- new PersistentMetricsTimeVaryingRate("regionSplitSuccessCount", registry);
-
- public final MetricsLongValue regionSplitFailureCount =
- new MetricsLongValue("regionSplitFailureCount", registry);
-
- /**
- * Number of times checksum verification failed.
- */
- public final MetricsLongValue checksumFailuresCount =
- new MetricsLongValue("checksumFailuresCount", registry);
-
- /**
- * time blocked on lack of resources
- */
- public final MetricsHistogram updatesBlockedSeconds = new MetricsHistogram(
- "updatesBlockedSeconds", registry);
-
- /**
- * time blocked on memstoreHW
- */
- public final MetricsHistogram updatesBlockedSecondsHighWater = new MetricsHistogram(
- "updatesBlockedSecondsHighWater",registry);
-
- public RegionServerMetrics() {
- MetricsContext context = MetricsUtil.getContext("hbase");
- metricsRecord = MetricsUtil.createRecord(context, "regionserver");
- String name = Thread.currentThread().getName();
- metricsRecord.setTag("RegionServer", name);
- context.registerUpdater(this);
- // Add jvmmetrics.
- JvmMetrics.init("RegionServer", name);
- // Add Hbase Info metrics
- HBaseInfo.init();
-
- // export for JMX
- statistics = new RegionServerStatistics(this.registry, name);
-
- // get custom attributes
- try {
- Object m = ContextFactory.getFactory().getAttribute("hbase.extendedperiod");
- if (m instanceof String) {
- this.extendedPeriod = Long.parseLong((String) m)*1000;
- }
- } catch (IOException ioe) {
- LOG.info("Couldn't load ContextFactory for Metrics config info");
- }
-
- LOG.info("Initialized");
- }
-
- public void shutdown() {
- if (statistics != null)
- statistics.shutdown();
- }
-
- /**
- * Since this object is a registered updater, this method will be called
- * periodically, e.g. every 5 seconds.
- * @param caller the metrics context that this responsible for calling us
- */
- public void doUpdates(MetricsContext caller) {
- synchronized (this) {
- this.lastUpdate = System.currentTimeMillis();
-
- // has the extended period for long-living stats elapsed?
- if (this.extendedPeriod > 0 &&
- this.lastUpdate - this.lastExtUpdate >= this.extendedPeriod) {
- this.lastExtUpdate = this.lastUpdate;
- this.compactionTime.clear();
- this.compactionSize.clear();
- this.flushTime.clear();
- this.flushSize.clear();
- this.resetAllMinMax();
- }
-
- this.stores.pushMetric(this.metricsRecord);
- this.storefiles.pushMetric(this.metricsRecord);
- this.storefileIndexSizeMB.pushMetric(this.metricsRecord);
- this.rootIndexSizeKB.pushMetric(this.metricsRecord);
- this.totalStaticIndexSizeKB.pushMetric(this.metricsRecord);
- this.totalStaticBloomSizeKB.pushMetric(this.metricsRecord);
- this.memstoreSizeMB.pushMetric(this.metricsRecord);
- this.mbInMemoryWithoutWAL.pushMetric(this.metricsRecord);
- this.numPutsWithoutWAL.pushMetric(this.metricsRecord);
- this.readRequestsCount.pushMetric(this.metricsRecord);
- this.writeRequestsCount.pushMetric(this.metricsRecord);
- this.regions.pushMetric(this.metricsRecord);
- this.requests.pushMetric(this.metricsRecord);
- this.compactionQueueSize.pushMetric(this.metricsRecord);
- this.flushQueueSize.pushMetric(this.metricsRecord);
- this.blockCacheSize.pushMetric(this.metricsRecord);
- this.blockCacheFree.pushMetric(this.metricsRecord);
- this.blockCacheCount.pushMetric(this.metricsRecord);
- this.blockCacheHitCount.pushMetric(this.metricsRecord);
- this.blockCacheMissCount.pushMetric(this.metricsRecord);
- this.blockCacheEvictedCount.pushMetric(this.metricsRecord);
- this.blockCacheHitRatio.pushMetric(this.metricsRecord);
- this.blockCacheHitCachingRatio.pushMetric(this.metricsRecord);
- this.hdfsBlocksLocalityIndex.pushMetric(this.metricsRecord);
- this.blockCacheHitRatioPastNPeriods.pushMetric(this.metricsRecord);
- this.blockCacheHitCachingRatioPastNPeriods.pushMetric(this.metricsRecord);
-
- // Mix in HFile and HLog metrics
- // Be careful. Here is code for MTVR from up in hadoop:
- // public synchronized void inc(final int numOps, final long time) {
- // currentData.numOperations += numOps;
- // currentData.time += time;
- // long timePerOps = time/numOps;
- // minMax.update(timePerOps);
- // }
- // Means you can't pass a numOps of zero or get a ArithmeticException / by zero.
- // HLog metrics
- addHLogMetric(HLogMetrics.getWriteTime(), this.fsWriteLatency);
- addHLogMetric(HLogMetrics.getWriteSize(), this.fsWriteSize);
- addHLogMetric(HLogMetrics.getSyncTime(), this.fsSyncLatency);
- addHLogMetric(HLogMetrics.getSlowAppendTime(), this.slowHLogAppendTime);
- this.slowHLogAppendCount.set(HLogMetrics.getSlowAppendCount());
- // HFile metrics, sequential reads
- int ops = HFile.getReadOps();
- if (ops != 0) this.fsReadLatency.inc(ops, HFile.getReadTimeMs());
- // HFile metrics, positional reads
- ops = HFile.getPreadOps();
- if (ops != 0) this.fsPreadLatency.inc(ops, HFile.getPreadTimeMs());
- this.checksumFailuresCount.set(HFile.getChecksumFailuresCount());
-
- /* NOTE: removed HFile write latency. 2 reasons:
- * 1) Mixing HLog latencies are far higher priority since they're
- * on-demand and HFile is used in background (compact/flush)
- * 2) HFile metrics are being handled at a higher level
- * by compaction & flush metrics.
- */
-
- for(Long latency : HFile.getReadLatenciesNanos()) {
- this.fsReadLatencyHistogram.update(latency);
- }
- for(Long latency : HFile.getPreadLatenciesNanos()) {
- this.fsPreadLatencyHistogram.update(latency);
- }
- for(Long latency : HFile.getWriteLatenciesNanos()) {
- this.fsWriteLatencyHistogram.update(latency);
- }
-
-
- // push the result
- this.fsPreadLatency.pushMetric(this.metricsRecord);
- this.fsReadLatency.pushMetric(this.metricsRecord);
- this.fsWriteLatency.pushMetric(this.metricsRecord);
- this.fsWriteSize.pushMetric(this.metricsRecord);
-
- this.fsReadLatencyHistogram.pushMetric(this.metricsRecord);
- this.fsWriteLatencyHistogram.pushMetric(this.metricsRecord);
- this.fsPreadLatencyHistogram.pushMetric(this.metricsRecord);
-
- this.fsSyncLatency.pushMetric(this.metricsRecord);
- this.compactionTime.pushMetric(this.metricsRecord);
- this.compactionSize.pushMetric(this.metricsRecord);
- this.flushTime.pushMetric(this.metricsRecord);
- this.flushSize.pushMetric(this.metricsRecord);
- this.slowHLogAppendCount.pushMetric(this.metricsRecord);
- this.regionSplitSuccessCount.pushMetric(this.metricsRecord);
- this.regionSplitFailureCount.pushMetric(this.metricsRecord);
- this.checksumFailuresCount.pushMetric(this.metricsRecord);
- this.updatesBlockedSeconds.pushMetric(this.metricsRecord);
- this.updatesBlockedSecondsHighWater.pushMetric(this.metricsRecord);
- }
- this.metricsRecord.update();
- }
-
- private void addHLogMetric(HLogMetrics.Metric logMetric,
- MetricsTimeVaryingRate hadoopMetric) {
- if (logMetric.count > 0)
- hadoopMetric.inc(logMetric.min);
- if (logMetric.count > 1)
- hadoopMetric.inc(logMetric.max);
- if (logMetric.count > 2) {
- int ops = logMetric.count - 2;
- hadoopMetric.inc(ops, logMetric.total - logMetric.max - logMetric.min);
- }
- }
-
- public void resetAllMinMax() {
- this.atomicIncrementTime.resetMinMax();
- this.fsReadLatency.resetMinMax();
- this.fsWriteLatency.resetMinMax();
- this.fsWriteSize.resetMinMax();
- this.fsSyncLatency.resetMinMax();
- this.slowHLogAppendTime.resetMinMax();
- }
-
- /**
- * @return Count of requests.
- */
- public float getRequests() {
- return this.requests.getPreviousIntervalValue();
- }
-
- /**
- * @param time time that compaction took
- * @param size bytesize of storefiles in the compaction
- */
- public synchronized void addCompaction(long time, long size) {
- this.compactionTime.update(time);
- this.compactionSize.update(size);
- }
-
- /**
- * @param flushes history in