diff --git a/dev-support/findbugs-exclude.xml b/dev-support/findbugs-exclude.xml index 5ab62e43b47..33c3c3c4f6a 100644 --- a/dev-support/findbugs-exclude.xml +++ b/dev-support/findbugs-exclude.xml @@ -50,7 +50,7 @@ - + diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilitySingletonFactory.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilitySingletonFactory.java index 97e887fe08d..2b2c53d7000 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilitySingletonFactory.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilitySingletonFactory.java @@ -44,6 +44,7 @@ public class CompatibilitySingletonFactory extends CompatibilityFactory { * * @return the singleton */ + @SuppressWarnings("unchecked") public static synchronized T getInstance(Class klass) { T instance = (T) instances.get(klass); if (instance == null) { diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java similarity index 91% rename from hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSource.java rename to hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java index 8fcfaf01b0b..1350b0182fa 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java @@ -16,29 +16,29 @@ * limitations under the License. */ -package org.apache.hadoop.hbase.master.metrics; +package org.apache.hadoop.hbase.master; -import org.apache.hadoop.hbase.metrics.BaseMetricsSource; +import org.apache.hadoop.hbase.metrics.BaseSource; /** * Interface that classes that expose metrics about the master will implement. */ -public interface MasterMetricsSource extends BaseMetricsSource { +public interface MetricsMasterSource extends BaseSource { /** * The name of the metrics */ - static final String METRICS_NAME = "HMaster"; + static final String METRICS_NAME = "Server"; /** * The context metrics will be under. */ - static final String METRICS_CONTEXT = "hmaster"; + static final String METRICS_CONTEXT = "master"; /** * The name of the metrics context that metrics will be under in jmx */ - static final String METRICS_JMX_CONTEXT = "HMaster"; + static final String METRICS_JMX_CONTEXT = "Master,sub=" + METRICS_NAME; /** * Description @@ -76,24 +76,28 @@ public interface MasterMetricsSource extends BaseMetricsSource { /** * Increment the number of requests the cluster has seen. + * * @param inc Ammount to increment the total by. */ void incRequests(final int inc); /** * Set the number of regions in transition. + * * @param ritCount count of the regions in transition. */ void setRIT(int ritCount); /** * Set the count of the number of regions that have been in transition over the threshold time. + * * @param ritCountOverThreshold number of regions in transition for longer than threshold. */ void setRITCountOverThreshold(int ritCountOverThreshold); /** * Set the oldest region in transition. + * * @param age age of the oldest RIT. */ void setRITOldestAge(long age); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceFactory.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactory.java similarity index 76% rename from hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceFactory.java rename to hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactory.java index 157b2deaadf..63a85a33528 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceFactory.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactory.java @@ -16,13 +16,13 @@ * limitations under the License. */ -package org.apache.hadoop.hbase.master.metrics; +package org.apache.hadoop.hbase.master; /** - * Interface of a factory to create MasterMetricsSource when given a MasterMetricsWrapper + * Interface of a factory to create MetricsMasterSource when given a MetricsMasterWrapper */ -public interface MasterMetricsSourceFactory { +public interface MetricsMasterSourceFactory { - MasterMetricsSource create(MasterMetricsWrapper beanWrapper); + MetricsMasterSource create(MetricsMasterWrapper masterWrapper); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsWrapper.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapper.java similarity index 90% rename from hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsWrapper.java rename to hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapper.java index ff416eb637e..838676356f4 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsWrapper.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapper.java @@ -16,13 +16,13 @@ * limitations under the License. */ -package org.apache.hadoop.hbase.master.metrics; +package org.apache.hadoop.hbase.master; /** * This is the interface that will expose information to hadoop1/hadoop2 implementations of the - * MasterMetricsSource. + * MetricsMasterSource. */ -public interface MasterMetricsWrapper { +public interface MetricsMasterWrapper { /** * Get ServerName @@ -31,54 +31,63 @@ public interface MasterMetricsWrapper { /** * Get Average Load + * * @return Average Load */ double getAverageLoad(); /** * Get the Cluster ID + * * @return Cluster ID */ String getClusterId(); /** * Get the Zookeeper Quorum Info + * * @return Zookeeper Quorum Info */ String getZookeeperQuorum(); /** * Get the co-processors + * * @return Co-processors */ String[] getCoprocessors(); /** * Get hbase master start time + * * @return Start time of master in milliseconds */ - long getMasterStartTime(); + long getStartTime(); /** * Get the hbase master active time + * * @return Time in milliseconds when master became active */ - long getMasterActiveTime(); + long getActiveTime(); /** * Whether this master is the active master + * * @return True if this is the active master */ boolean getIsActiveMaster(); /** * Get the live region servers + * * @return Live region servers */ int getRegionServers(); /** * Get the dead region servers + * * @return Dead region Servers */ int getDeadRegionServers(); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseMetricsSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSource.java similarity index 87% rename from hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseMetricsSource.java rename to hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSource.java index e8cefefc516..20139c4c9d4 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseMetricsSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSource.java @@ -19,9 +19,11 @@ package org.apache.hadoop.hbase.metrics; /** - * BaseMetricsSource for dynamic metrics to announce to Metrics2 + * BaseSource for dynamic metrics to announce to Metrics2 */ -public interface BaseMetricsSource { +public interface BaseSource { + + public static final String HBASE_METRICS_SYSTEM_NAME = "HBase"; /** * Clear out the metrics and re-prepare the source. @@ -53,11 +55,11 @@ public interface BaseMetricsSource { void decGauge(String gaugeName, long delta); /** - * Remove a gauge and no longer announce it. + * Remove a metric and no longer announce it. * * @param key Name of the gauge to remove. */ - void removeGauge(String key); + void removeMetric(String key); /** * Add some amount to a counter. @@ -84,12 +86,4 @@ public interface BaseMetricsSource { */ void updateQuantile(String name, long value); - - /** - * Remove a counter and stop announcing it to metrics2. - * - * @param key - */ - void removeCounter(String key); - } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSource.java new file mode 100644 index 00000000000..5e6e27323f9 --- /dev/null +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSource.java @@ -0,0 +1,62 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.regionserver; + +import org.apache.hadoop.hbase.metrics.BaseSource; + +/** + * This interface will be implemented by a MetricsSource that will export metrics from + * multiple regions into the hadoop metrics system. + */ +public interface MetricsRegionAggregateSource extends BaseSource { + + /** + * The name of the metrics + */ + static final String METRICS_NAME = "Regions"; + + /** + * The name of the metrics context that metrics will be under. + */ + static final String METRICS_CONTEXT = "regionserver"; + + /** + * Description + */ + static final String METRICS_DESCRIPTION = "Metrics about HBase RegionServer regions and tables"; + + /** + * The name of the metrics context that metrics will be under in jmx + */ + static final String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME; + + /** + * Register a MetricsRegionSource as being open. + * + * @param source the source for the region being opened. + */ + void register(MetricsRegionSource source); + + /** + * Remove a region's source. This is called when a region is closed. + * + * @param source The region to remove. + */ + void deregister(MetricsRegionSource source); +} diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java new file mode 100644 index 00000000000..0ed4fee41ab --- /dev/null +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java @@ -0,0 +1,166 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.regionserver; + +import org.apache.hadoop.hbase.metrics.BaseSource; + +/** + * Interface for classes that expose metrics about the regionserver. + */ +public interface MetricsRegionServerSource extends BaseSource { + + /** + * The name of the metrics + */ + static final String METRICS_NAME = "Server"; + + /** + * The name of the metrics context that metrics will be under. + */ + static final String METRICS_CONTEXT = "regionserver"; + + /** + * Description + */ + static final String METRICS_DESCRIPTION = "Metrics about HBase RegionServer"; + + /** + * The name of the metrics context that metrics will be under in jmx + */ + static final String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME; + + /** + * Update the Put time histogram + * + * @param t time it took + */ + void updatePut(long t); + + /** + * Update the Delete time histogram + * + * @param t time it took + */ + void updateDelete(long t); + + /** + * Update the Get time histogram . + * + * @param t time it took + */ + void updateGet(long t); + + /** + * Update the Increment time histogram. + * + * @param t time it took + */ + void updateIncrement(long t); + + /** + * Update the Append time histogram. + * + * @param t time it took + */ + void updateAppend(long t); + + // Strings used for exporting to metrics system. + static final String REGION_COUNT = "regionCount"; + static final String REGION_COUNT_DESC = "Number of regions"; + static final String STORE_COUNT = "storeCount"; + static final String STORE_COUNT_DESC = "Number of Stores"; + static final String STOREFILE_COUNT = "storeFileCount"; + static final String STOREFILE_COUNT_DESC = "Number of Store Files"; + static final String MEMSTORE_SIZE = "memStoreSize"; + static final String MEMSTORE_SIZE_DESC = "Size of the memstore"; + static final String STOREFILE_SIZE = "storeFileSize"; + static final String STOREFILE_SIZE_DESC = "Size of storefiles being served."; + static final String TOTAL_REQUEST_COUNT = "totalRequestCount"; + static final String TOTAL_REQUEST_COUNT_DESC = + "Total number of requests this RegionServer has answered."; + static final String READ_REQUEST_COUNT = "readRequestCount"; + static final String READ_REQUEST_COUNT_DESC = + "Number of read requests this region server has answered."; + static final String WRITE_REQUEST_COUNT = "writeRequestCount"; + static final String WRITE_REQUEST_COUNT_DESC = + "Number of mutation requests this region server has answered."; + static final String CHECK_MUTATE_FAILED_COUNT = "checkMutateFailedCount"; + static final String CHECK_MUTATE_FAILED_COUNT_DESC = + "Number of Check and Mutate calls that failed the checks."; + static final String CHECK_MUTATE_PASSED_COUNT = "checkMutatePassedCount"; + static final String CHECK_MUTATE_PASSED_COUNT_DESC = + "Number of Check and Mutate calls that passed the checks."; + static final String STOREFILE_INDEX_SIZE = "storeFileIndexSize"; + static final String STOREFILE_INDEX_SIZE_DESC = "Size of indexes in storefiles on disk."; + static final String STATIC_INDEX_SIZE = "staticIndexSize"; + static final String STATIC_INDEX_SIZE_DESC = "Uncompressed size of the static indexes."; + static final String STATIC_BLOOM_SIZE = "staticBloomSize"; + static final String STATIC_BLOOM_SIZE_DESC = + "Uncompressed size of the static bloom filters."; + static final String NUMBER_OF_PUTS_WITHOUT_WAL = "putsWithoutWALCount"; + static final String NUMBER_OF_PUTS_WITHOUT_WAL_DESC = + "Number of mutations that have been sent by clients with the write ahead logging turned off."; + static final String DATA_SIZE_WITHOUT_WAL = "putsWithoutWALSize"; + static final String DATA_SIZE_WITHOUT_WAL_DESC = + "Size of data that has been sent by clients with the write ahead logging turned off."; + static final String PERCENT_FILES_LOCAL = "percentFilesLocal"; + static final String PERCENT_FILES_LOCAL_DESC = + "The percent of HFiles that are stored on the local hdfs data node."; + static final String COMPACTION_QUEUE_LENGTH = "compactionQueueLength"; + static final String COMPACTION_QUEUE_LENGTH_DESC = "Length of the queue for compactions."; + static final String FLUSH_QUEUE_LENGTH = "flushQueueLength"; + static final String FLUSH_QUEUE_LENGTH_DESC = "Length of the queue for region flushes"; + static final String BLOCK_CACHE_FREE_SIZE = "blockCacheFreeSize"; + static final String BLOCK_CACHE_FREE_DESC = + "Size of the block cache that is not occupied."; + static final String BLOCK_CACHE_COUNT = "blockCacheCount"; + static final String BLOCK_CACHE_COUNT_DESC = "Number of block in the block cache."; + static final String BLOCK_CACHE_SIZE = "blockCacheSize"; + static final String BLOCK_CACHE_SIZE_DESC = "Size of the block cache."; + static final String BLOCK_CACHE_HIT_COUNT = "blockCacheHitCount"; + static final String BLOCK_CACHE_HIT_COUNT_DESC = "Count of the hit on the block cache."; + static final String BLOCK_CACHE_MISS_COUNT = "blockCacheMissCount"; + static final String BLOCK_COUNT_MISS_COUNT_DESC = + "Number of requests for a block that missed the block cache."; + static final String BLOCK_CACHE_EVICTION_COUNT = "blockCacheEvictionCount"; + static final String BLOCK_CACHE_EVICTION_COUNT_DESC = + "Count of the number of blocks evicted from the block cache."; + static final String BLOCK_CACHE_HIT_PERCENT = "blockCountHitPercent"; + static final String BLOCK_CACHE_HIT_PERCENT_DESC = + "Percent of block cache requests that are hits"; + static final String BLOCK_CACHE_EXPRESS_HIT_PERCENT = "blockCacheExpressHitPercent"; + static final String BLOCK_CACHE_EXPRESS_HIT_PERCENT_DESC = + "The percent of the time that requests with the cache turned on hit the cache."; + static final String RS_START_TIME_NAME = "regionServerStartTime"; + static final String ZOOKEEPER_QUORUM_NAME = "zookeeperQuorum"; + static final String SERVER_NAME_NAME = "serverName"; + static final String CLUSTER_ID_NAME = "clusterId"; + static final String RS_START_TIME_DESC = "RegionServer Start Time"; + static final String ZOOKEEPER_QUORUM_DESC = "Zookeeper Quorum"; + static final String SERVER_NAME_DESC = "Server Name"; + static final String CLUSTER_ID_DESC = "Cluster Id"; + static final String UPDATES_BLOCKED_TIME = "updatesBlockedTime"; + static final String UPDATES_BLOCKED_DESC = + "Number of MS updates have been blocked so that the memstore can be flushed."; + static final String DELETE_KEY = "delete"; + static final String GET_KEY = "get"; + static final String INCREMENT_KEY = "increment"; + static final String PUT_KEY = "multiput"; + static final String APPEND_KEY = "append"; +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MXBean.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactory.java similarity index 62% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MXBean.java rename to hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactory.java index b0a92c573c0..39203cb7dda 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MXBean.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactory.java @@ -18,29 +18,24 @@ package org.apache.hadoop.hbase.regionserver; -import org.apache.hadoop.classification.InterfaceStability.Evolving; - /** - * This is the JMX management interface for HBase Region Server information + * Interface of a factory to create Metrics Sources used inside of regionservers. */ -@Evolving -public interface MXBean { +public interface MetricsRegionServerSourceFactory { /** - * Return RegionServer's ServerName - * @return ServerName + * Given a wrapper create a MetricsRegionServerSource. + * + * @param regionServerWrapper The wrapped region server + * @return a Metrics Source. */ - public String getServerName(); + MetricsRegionServerSource createServer(MetricsRegionServerWrapper regionServerWrapper); /** - * Get loaded co-processors - * @return Loaded Co-processors + * Create a MetricsRegionSource from a MetricsRegionWrapper. + * + * @param wrapper + * @return */ - public String[] getCoprocessors(); - - /** - * Get Zookeeper Quorum - * @return Comma-separated list of Zookeeper Quorum servers - */ - public String getZookeeperQuorum(); + MetricsRegionSource createRegion(MetricsRegionWrapper wrapper); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java new file mode 100644 index 00000000000..454e286c097 --- /dev/null +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java @@ -0,0 +1,205 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.regionserver; + +/** + * This is the interface that will expose RegionServer information to hadoop1/hadoop2 + * implementations of the MetricsRegionServerSource. + */ +public interface MetricsRegionServerWrapper { + + /** + * Get ServerName + */ + public String getServerName(); + + /** + * Get the Cluster ID + * + * @return Cluster ID + */ + public String getClusterId(); + + /** + * Get the Zookeeper Quorum Info + * + * @return Zookeeper Quorum Info + */ + public String getZookeeperQuorum(); + + /** + * Get the co-processors + * + * @return Co-processors + */ + public String getCoprocessors(); + + /** + * Get HRegionServer start time + * + * @return Start time of RegionServer in milliseconds + */ + public long getStartCode(); + + /** + * The number of online regions + */ + long getNumOnlineRegions(); + + /** + * Get the number of stores hosted on this region server. + */ + long getNumStores(); + + /** + * Get the number of store files hosted on this region server. + */ + long getNumStoreFiles(); + + /** + * Get the size of the memstore on this region server. + */ + long getMemstoreSize(); + + /** + * Get the total size of the store files this region server is serving from. + */ + long getStoreFileSize(); + + /** + * Get the number of requests per second. + */ + double getRequestsPerSecond(); + + /** + * Get the total number of requests per second. + */ + long getTotalRequestCount(); + + /** + * Get the number of read requests to regions hosted on this region server. + */ + long getReadRequestsCount(); + + /** + * Get the number of write requests to regions hosted on this region server. + */ + long getWriteRequestsCount(); + + /** + * Get the number of CAS operations that failed. + */ + long getCheckAndMutateChecksFailed(); + + /** + * Get the number of CAS operations that passed. + */ + long getCheckAndMutateChecksPassed(); + + /** + * Get the Size of indexes in storefiles on disk. + */ + long getStoreFileIndexSize(); + + /** + * Get the size of of the static indexes including the roots. + */ + long getTotalStaticIndexSize(); + + /** + * Get the size of the static bloom filters. + */ + long getTotalStaticBloomSize(); + + /** + * Number of mutations received with WAL explicitly turned off. + */ + long getNumPutsWithoutWAL(); + + /** + * Ammount of data in the memstore but not in the WAL because mutations explicitly had their + * WAL turned off. + */ + long getDataInMemoryWithoutWAL(); + + /** + * Get the percent of HFiles' that are local. + */ + int getPercentFileLocal(); + + /** + * Get the size of the compaction queue + */ + int getCompactionQueueSize(); + + /** + * Get the size of the flush queue. + */ + int getFlushQueueSize(); + + /** + * Get the size of the block cache that is free. + */ + long getBlockCacheFreeSize(); + + /** + * Get the number of items in the block cache. + */ + long getBlockCacheCount(); + + /** + * Get the total size of the block cache. + */ + long getBlockCacheSize(); + + /** + * Get the count of hits to the block cache + */ + long getBlockCacheHitCount(); + + /** + * Get the count of misses to the block cache. + */ + long getBlockCacheMissCount(); + + /** + * Get the number of items evicted from the block cache. + */ + long getBlockCacheEvictedCount(); + + /** + * Get the percent of all requests that hit the block cache. + */ + int getBlockCacheHitPercent(); + + /** + * Get the percent of requests with the block cache turned on that hit the block cache. + */ + int getBlockCacheHitCachingPercent(); + + /** + * Force a re-computation of the metrics. + */ + void forceRecompute(); + + /** + * Get the amount of time that updates were blocked. + */ + long getUpdatesBlockedTime(); +} diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java new file mode 100644 index 00000000000..0bc14c328fb --- /dev/null +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java @@ -0,0 +1,62 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.regionserver; + + +/** + * This interface will be implemented to allow single regions to push metrics into + * MetricsRegionAggregateSource that will in turn push data to the Hadoop metrics system. + */ +public interface MetricsRegionSource extends Comparable { + + /** + * Close the region's metrics as this region is closing. + */ + void close(); + + /** + * Update related counts of puts. + */ + void updatePut(); + + /** + * Update related counts of deletes. + */ + void updateDelete(); + + /** + * Update related counts of gets. + */ + void updateGet(); + + /** + * Update related counts of increments. + */ + void updateIncrement(); + + /** + * Update related counts of appends. + */ + void updateAppend(); + + /** + * Get the aggregate source to which this reports. + */ + MetricsRegionAggregateSource getAggregateSource(); +} diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java new file mode 100644 index 00000000000..2c533ea871d --- /dev/null +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java @@ -0,0 +1,71 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.regionserver; + +/** + * Interface of class that will wrap an HRegion and export numbers so they can be + * used in MetricsRegionSource + */ +public interface MetricsRegionWrapper { + + /** + * Get the name of the table the region belongs to. + * + * @return The string version of the table name. + */ + String getTableName(); + + /** + * Get the name of the region. + * + * @return The encoded name of the region. + */ + String getRegionName(); + + /** + * Get the number of stores hosted on this region server. + */ + long getNumStores(); + + /** + * Get the number of store files hosted on this region server. + */ + long getNumStoreFiles(); + + /** + * Get the size of the memstore on this region server. + */ + long getMemstoreSize(); + + /** + * Get the total size of the store files this region server is serving from. + */ + long getStoreFileSize(); + + /** + * Get the total number of read requests that have been issued against this region + */ + long getReadRequestCount(); + + /** + * Get the total number of mutations that have been issued against this region. + */ + long getWriteRequestCount(); + +} diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationMetricsSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSource.java similarity index 76% rename from hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationMetricsSource.java rename to hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSource.java index 0090f495341..5b79a3977a4 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationMetricsSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSource.java @@ -16,29 +16,29 @@ * limitations under the License. */ -package org.apache.hadoop.hbase.replication.regionserver.metrics; +package org.apache.hadoop.hbase.replication.regionserver; -import org.apache.hadoop.hbase.metrics.BaseMetricsSource; +import org.apache.hadoop.hbase.metrics.BaseSource; /** * Provides access to gauges and counters. Implementers will hide the details of hadoop1 or * hadoop2's metrics2 classes and publishing. */ -public interface ReplicationMetricsSource extends BaseMetricsSource { +public interface MetricsReplicationSource extends BaseSource { /** * The name of the metrics */ - static final String METRICS_NAME = "ReplicationMetrics"; + static final String METRICS_NAME = "Replication"; /** * The name of the metrics context that metrics will be under. */ - static final String METRICS_CONTEXT = "replicationmetrics"; + static final String METRICS_CONTEXT = "regionserver"; /** * The name of the metrics context that metrics will be under. */ - static final String METRICS_JMX_CONTEXT = "ReplicationMetrics"; + static final String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME; /** * A description. diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/rest/metrics/RESTMetricsSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSource.java similarity index 90% rename from hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/rest/metrics/RESTMetricsSource.java rename to hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSource.java index 9d7f6914dc4..aa43f35fedb 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/rest/metrics/RESTMetricsSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSource.java @@ -16,20 +16,20 @@ * limitations under the License. */ -package org.apache.hadoop.hbase.rest.metrics; +package org.apache.hadoop.hbase.rest; -import org.apache.hadoop.hbase.metrics.BaseMetricsSource; +import org.apache.hadoop.hbase.metrics.BaseSource; /** * Interface of the Metrics Source that will export data to Hadoop's Metrics2 system. */ -public interface RESTMetricsSource extends BaseMetricsSource { +public interface MetricsRESTSource extends BaseSource { - public static String METRICS_NAME = "Rest"; + public static String METRICS_NAME = "REST"; public static String CONTEXT = "rest"; - public static String JMX_CONTEXT = "Rest"; + public static String JMX_CONTEXT = "REST"; public static String METRICS_DESCRIPTION = "Metrics about the HBase REST server"; @@ -49,42 +49,49 @@ public interface RESTMetricsSource extends BaseMetricsSource { /** * Increment the number of requests + * * @param inc Ammount to increment by */ void incrementRequests(int inc); /** * Increment the number of successful Get requests. + * * @param inc Number of successful get requests. */ void incrementSucessfulGetRequests(int inc); /** * Increment the number of successful Put requests. + * * @param inc Number of successful put requests. */ void incrementSucessfulPutRequests(int inc); /** * Increment the number of successful Delete requests. + * * @param inc */ void incrementSucessfulDeleteRequests(int inc); /** * Increment the number of failed Put Requests. + * * @param inc Number of failed Put requests. */ void incrementFailedPutRequests(int inc); /** * Increment the number of failed Get requests. + * * @param inc The number of failed Get Requests. */ void incrementFailedGetRequests(int inc); /** * Increment the number of failed Delete requests. + * * @param inc The number of failed delete requests. */ void incrementFailedDeleteRequests(int inc); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/metrics/ThriftServerMetricsSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSource.java similarity index 92% rename from hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/metrics/ThriftServerMetricsSource.java rename to hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSource.java index f6ba023b07c..206154fdb46 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/metrics/ThriftServerMetricsSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSource.java @@ -16,14 +16,14 @@ * limitations under the License. */ -package org.apache.hadoop.hbase.thrift.metrics; +package org.apache.hadoop.hbase.thrift; -import org.apache.hadoop.hbase.metrics.BaseMetricsSource; +import org.apache.hadoop.hbase.metrics.BaseSource; /** * Inteface of a class that will export metrics about Thrift to hadoop's metrics2. */ -public interface ThriftServerMetricsSource extends BaseMetricsSource { +public interface MetricsThriftServerSource extends BaseSource { static final String BATCH_GET_KEY = "batchGet"; static final String BATCH_MUTATE_KEY = "batchMutate"; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/metrics/ThriftServerMetricsSourceFactory.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactory.java similarity index 81% rename from hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/metrics/ThriftServerMetricsSourceFactory.java rename to hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactory.java index be6b5f9e7ab..8fca2cf3ce8 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/metrics/ThriftServerMetricsSourceFactory.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactory.java @@ -16,10 +16,10 @@ * limitations under the License. */ -package org.apache.hadoop.hbase.thrift.metrics; +package org.apache.hadoop.hbase.thrift; /** Factory that will be used to create metrics sources for the two diffent types of thrift servers. */ -public interface ThriftServerMetricsSourceFactory { +public interface MetricsThriftServerSourceFactory { static final String METRICS_NAME = "Thrift"; static final String METRICS_DESCRIPTION = "Thrift Server Metrics"; @@ -28,8 +28,10 @@ public interface ThriftServerMetricsSourceFactory { static final String THRIFT_TWO_METRICS_CONTEXT = "thrift-two"; static final String THRIFT_TWO_JMX_CONTEXT = "Thrift,sub=ThriftTwo"; - ThriftServerMetricsSource createThriftOneSource(); + /** Create a Source for a thrift one server */ + MetricsThriftServerSource createThriftOneSource(); - ThriftServerMetricsSource createThriftTwoSource(); + /** Create a Source for a thrift two server */ + MetricsThriftServerSource createThriftTwoSource(); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics/MetricHistogram.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricHistogram.java similarity index 82% rename from hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics/MetricHistogram.java rename to hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricHistogram.java index c5d6e491d38..f431632a170 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics/MetricHistogram.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricHistogram.java @@ -16,13 +16,15 @@ * limitations under the License. */ -package org.apache.hadoop.metrics; +package org.apache.hadoop.metrics2; /** - * + * Metrics Histogram interface. Implementing classes will expose computed + * quartile values through the metrics system. */ public interface MetricHistogram { + //Strings used to create metrics names. static final String NUM_OPS_METRIC_NAME = "_num_ops"; static final String MIN_METRIC_NAME = "_min"; static final String MAX_METRIC_NAME = "_max"; @@ -32,6 +34,10 @@ public interface MetricHistogram { static final String NINETY_FIFTH_PERCENTILE_METRIC_NAME = "_95th_percentile"; static final String NINETY_NINETH_PERCENTILE_METRIC_NAME = "_99th_percentile"; + /** + * Add a single value to a histogram's stream of values. + * @param value + */ void add(long value); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics/MetricsExecutor.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricsExecutor.java similarity index 92% rename from hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics/MetricsExecutor.java rename to hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricsExecutor.java index 4094922c3eb..f2ebc94d01e 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics/MetricsExecutor.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricsExecutor.java @@ -16,12 +16,12 @@ * limitations under the License. */ -package org.apache.hadoop.metrics; +package org.apache.hadoop.metrics2; import java.util.concurrent.ScheduledExecutorService; /** - * + * ScheduledExecutorService for metrics. */ public interface MetricsExecutor { diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/master/metrics/TestMasterMetricsSourceFactory.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterSourceFactory.java similarity index 78% rename from hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/master/metrics/TestMasterMetricsSourceFactory.java rename to hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterSourceFactory.java index 9f28c491f79..3d83975c10e 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/master/metrics/TestMasterMetricsSourceFactory.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterSourceFactory.java @@ -16,20 +16,21 @@ * limitations under the License. */ -package org.apache.hadoop.hbase.master.metrics; +package org.apache.hadoop.hbase.master; import org.apache.hadoop.hbase.CompatibilitySingletonFactory; +import org.apache.hadoop.hbase.master.MetricsMasterSource; import org.junit.Test; /** - * Test for the CompatibilitySingletonFactory and building MasterMetricsSource + * Test for the CompatibilitySingletonFactory and building MetricsMasterSource */ -public class TestMasterMetricsSourceFactory { +public class TestMetricsMasterSourceFactory { @Test(expected=RuntimeException.class) public void testGetInstanceNoHadoopCompat() throws Exception { //This should throw an exception because there is no compat lib on the class path. - CompatibilitySingletonFactory.getInstance(MasterMetricsSource.class); + CompatibilitySingletonFactory.getInstance(MetricsMasterSourceFactory.class); } } diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerSourceFactory.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerSourceFactory.java new file mode 100644 index 00000000000..1326b858e3b --- /dev/null +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerSourceFactory.java @@ -0,0 +1,36 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.regionserver; + +import org.apache.hadoop.hbase.CompatibilitySingletonFactory; +import org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactory; +import org.junit.Test; + +/** + * Test for the CompatibilitySingletonFactory and building MetricsRegionServerSource + */ +public class TestMetricsRegionServerSourceFactory { + + @Test(expected=RuntimeException.class) + public void testGetInstanceNoHadoopCompat() throws Exception { + //This should throw an exception because there is no compat lib on the class path. + CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class); + + } +} diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/metrics/TestReplicationMetricsSourceFactory.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceFactory.java similarity index 76% rename from hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/metrics/TestReplicationMetricsSourceFactory.java rename to hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceFactory.java index 9378dff80bc..637b6f79f2e 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/metrics/TestReplicationMetricsSourceFactory.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceFactory.java @@ -16,19 +16,20 @@ * limitations under the License. */ -package org.apache.hadoop.hbase.replication.regionserver.metrics; +package org.apache.hadoop.hbase.replication.regionserver; import org.apache.hadoop.hbase.CompatibilitySingletonFactory; +import org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSource; import org.junit.Test; /** - * Test for the CompatibilitySingletonFactory and building ReplicationMetricsSource + * Test for the CompatibilitySingletonFactory and building MetricsReplicationSource */ -public class TestReplicationMetricsSourceFactory { +public class TestMetricsReplicationSourceFactory { @Test(expected=RuntimeException.class) public void testGetInstanceNoHadoopCompat() throws Exception { //This should throw an exception because there is no compat lib on the class path. - CompatibilitySingletonFactory.getInstance(ReplicationMetricsSource.class); + CompatibilitySingletonFactory.getInstance(MetricsReplicationSource.class); } } diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/rest/metrics/TestRESTMetricsSource.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/rest/TestMetricsRESTSource.java similarity index 85% rename from hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/rest/metrics/TestRESTMetricsSource.java rename to hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/rest/TestMetricsRESTSource.java index e3f18f783f7..0691fa12962 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/rest/metrics/TestRESTMetricsSource.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/rest/TestMetricsRESTSource.java @@ -16,21 +16,22 @@ * limitations under the License. */ -package org.apache.hadoop.hbase.rest.metrics; +package org.apache.hadoop.hbase.rest; import org.apache.hadoop.hbase.CompatibilitySingletonFactory; +import org.apache.hadoop.hbase.rest.MetricsRESTSource; import org.junit.Test; /** * Test of Rest Metrics Source interface. */ -public class TestRESTMetricsSource { +public class TestMetricsRESTSource { @Test(expected=RuntimeException.class) public void testGetInstanceNoHadoopCompat() throws Exception { //This should throw an exception because there is no compat lib on the class path. - CompatibilitySingletonFactory.getInstance(RESTMetricsSource.class); + CompatibilitySingletonFactory.getInstance(MetricsRESTSource.class); } } diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelper.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelper.java index fc668bf8398..968ab83ac1b 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelper.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelper.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.test; -import org.apache.hadoop.hbase.metrics.BaseMetricsSource; +import org.apache.hadoop.hbase.metrics.BaseSource; /** Interface of a class to make assertions about metrics values. */ public interface MetricsAssertHelper { @@ -28,128 +28,128 @@ public interface MetricsAssertHelper { * * @param name The name of the tag. * @param expected The expected value - * @param source The BaseMetricsSource{@link BaseMetricsSource} that will provide the tags, + * @param source The BaseSource{@link BaseSource} that will provide the tags, * gauges, and counters. */ - public void assertTag(String name, String expected, BaseMetricsSource source); + public void assertTag(String name, String expected, BaseSource source); /** * Assert that a gauge exists and that it's value is equal to the expected value. * * @param name The name of the gauge * @param expected The expected value of the gauge. - * @param source The BaseMetricsSource{@link BaseMetricsSource} that will provide the tags, + * @param source The BaseSource{@link BaseSource} that will provide the tags, * gauges, and counters. */ - public void assertGauge(String name, long expected, BaseMetricsSource source); + public void assertGauge(String name, long expected, BaseSource source); /** * Assert that a gauge exists and it's value is greater than a given value * * @param name The name of the gauge * @param expected Value that the gauge is expected to be greater than - * @param source The BaseMetricsSource{@link BaseMetricsSource} that will provide the tags, + * @param source The BaseSource{@link BaseSource} that will provide the tags, * gauges, and counters. */ - public void assertGaugeGt(String name, long expected, BaseMetricsSource source); + public void assertGaugeGt(String name, long expected, BaseSource source); /** * Assert that a gauge exists and it's value is less than a given value * * @param name The name of the gauge * @param expected Value that the gauge is expected to be less than - * @param source The BaseMetricsSource{@link BaseMetricsSource} that will provide the tags, + * @param source The BaseSource{@link BaseSource} that will provide the tags, * gauges, and counters. */ - public void assertGaugeLt(String name, long expected, BaseMetricsSource source); + public void assertGaugeLt(String name, long expected, BaseSource source); /** * Assert that a gauge exists and that it's value is equal to the expected value. * * @param name The name of the gauge * @param expected The expected value of the gauge. - * @param source The BaseMetricsSource{@link BaseMetricsSource} that will provide the tags, + * @param source The BaseSource{@link BaseSource} that will provide the tags, * gauges, and counters. */ - public void assertGauge(String name, double expected, BaseMetricsSource source); + public void assertGauge(String name, double expected, BaseSource source); /** * Assert that a gauge exists and it's value is greater than a given value * * @param name The name of the gauge * @param expected Value that the gauge is expected to be greater than - * @param source The BaseMetricsSource{@link BaseMetricsSource} that will provide the tags, + * @param source The BaseSource{@link BaseSource} that will provide the tags, * gauges, and counters. */ - public void assertGaugeGt(String name, double expected, BaseMetricsSource source); + public void assertGaugeGt(String name, double expected, BaseSource source); /** * Assert that a gauge exists and it's value is less than a given value * * @param name The name of the gauge * @param expected Value that the gauge is expected to be less than - * @param source The BaseMetricsSource{@link BaseMetricsSource} that will provide the tags, + * @param source The BaseSource{@link BaseSource} that will provide the tags, * gauges, and counters. */ - public void assertGaugeLt(String name, double expected, BaseMetricsSource source); + public void assertGaugeLt(String name, double expected, BaseSource source); /** * Assert that a counter exists and that it's value is equal to the expected value. * * @param name The name of the counter. * @param expected The expected value - * @param source The BaseMetricsSource{@link BaseMetricsSource} that will provide the tags, + * @param source The BaseSource{@link BaseSource} that will provide the tags, * gauges, and counters. */ - public void assertCounter(String name, long expected, BaseMetricsSource source); + public void assertCounter(String name, long expected, BaseSource source); /** * Assert that a counter exists and that it's value is greater than the given value. * * @param name The name of the counter. * @param expected The value the counter is expected to be greater than. - * @param source The BaseMetricsSource{@link BaseMetricsSource} that will provide the tags, + * @param source The BaseSource{@link BaseSource} that will provide the tags, * gauges, and counters. */ - public void assertCounterGt(String name, long expected, BaseMetricsSource source); + public void assertCounterGt(String name, long expected, BaseSource source); /** * Assert that a counter exists and that it's value is less than the given value. * * @param name The name of the counter. * @param expected The value the counter is expected to be less than. - * @param source The BaseMetricsSource{@link BaseMetricsSource} that will provide the tags, + * @param source The BaseSource{@link BaseSource} that will provide the tags, * gauges, and counters. */ - public void assertCounterLt(String name, long expected, BaseMetricsSource source); + public void assertCounterLt(String name, long expected, BaseSource source); /** * Get the value of a counter. * * @param name name of the counter. - * @param source The BaseMetricsSource{@link BaseMetricsSource} that will provide the tags, + * @param source The BaseSource{@link BaseSource} that will provide the tags, * gauges, and counters. * @return long value of the counter. */ - public long getCounter(String name, BaseMetricsSource source); + public long getCounter(String name, BaseSource source); /** * Get the value of a gauge as a double. * * @param name name of the gauge. - * @param source The BaseMetricsSource{@link BaseMetricsSource} that will provide the tags, + * @param source The BaseSource{@link BaseSource} that will provide the tags, * gauges, and counters. * @return double value of the gauge. */ - public double getGaugeDouble(String name, BaseMetricsSource source); + public double getGaugeDouble(String name, BaseSource source); /** * Get the value of a gauge as a long. * * @param name name of the gauge. - * @param source The BaseMetricsSource{@link BaseMetricsSource} that will provide the tags, + * @param source The BaseSource{@link BaseSource} that will provide the tags, * gauges, and counters. * @return long value of the gauge. */ - public long getGaugeLong(String name, BaseMetricsSource source); + public long getGaugeLong(String name, BaseSource source); } diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/thrift/metrics/TestThriftServerMetricsSourceFactory.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestMetricsThriftServerSourceFactory.java similarity index 78% rename from hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/thrift/metrics/TestThriftServerMetricsSourceFactory.java rename to hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestMetricsThriftServerSourceFactory.java index b1f253e1db5..bd132bbf760 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/thrift/metrics/TestThriftServerMetricsSourceFactory.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestMetricsThriftServerSourceFactory.java @@ -16,21 +16,22 @@ * limitations under the License. */ -package org.apache.hadoop.hbase.thrift.metrics; +package org.apache.hadoop.hbase.thrift; import org.apache.hadoop.hbase.CompatibilitySingletonFactory; +import org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactory; import org.junit.Test; /** - * Test for the interface of ThriftServerMetricsSourceFactory + * Test for the interface of MetricsThriftServerSourceFactory */ -public class TestThriftServerMetricsSourceFactory { +public class TestMetricsThriftServerSourceFactory { @Test(expected=RuntimeException.class) public void testGetInstanceNoHadoopCompat() throws RuntimeException { //This should throw an exception because there is no compat lib on the class path. - CompatibilitySingletonFactory.getInstance(ThriftServerMetricsSourceFactory.class); + CompatibilitySingletonFactory.getInstance(MetricsThriftServerSourceFactory.class); } } diff --git a/hbase-hadoop1-compat/pom.xml b/hbase-hadoop1-compat/pom.xml index eacde23a3f3..6b554716208 100644 --- a/hbase-hadoop1-compat/pom.xml +++ b/hbase-hadoop1-compat/pom.xml @@ -97,6 +97,10 @@ limitations under the License. com.yammer.metrics metrics-core + + log4j + log4j + org.apache.hadoop hadoop-test diff --git a/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceFactoryImpl.java b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactoryImpl.java similarity index 63% rename from hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceFactoryImpl.java rename to hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactoryImpl.java index 4a170462127..350c39d9152 100644 --- a/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceFactoryImpl.java +++ b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactoryImpl.java @@ -16,22 +16,22 @@ * limitations under the License. */ -package org.apache.hadoop.hbase.master.metrics; +package org.apache.hadoop.hbase.master; /** - * Factory to create MasterMetricsSource when given a MasterMetricsWrapper + * Factory to create MetricsMasterSource when given a MetricsMasterWrapper */ -public class MasterMetricsSourceFactoryImpl implements MasterMetricsSourceFactory { +public class MetricsMasterSourceFactoryImpl implements MetricsMasterSourceFactory { private static enum FactoryStorage { INSTANCE; - MasterMetricsSource source; + MetricsMasterSource masterSource; } @Override - public synchronized MasterMetricsSource create(MasterMetricsWrapper beanWrapper) { - if (FactoryStorage.INSTANCE.source == null ) { - FactoryStorage.INSTANCE.source = new MasterMetricsSourceImpl(beanWrapper); + public synchronized MetricsMasterSource create(MetricsMasterWrapper masterWrapper) { + if (FactoryStorage.INSTANCE.masterSource == null) { + FactoryStorage.INSTANCE.masterSource = new MetricsMasterSourceImpl(masterWrapper); } - return FactoryStorage.INSTANCE.source; + return FactoryStorage.INSTANCE.masterSource; } } diff --git a/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceImpl.java b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java similarity index 79% rename from hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceImpl.java rename to hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java index 85c7373b89a..b00fec50160 100644 --- a/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceImpl.java +++ b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java @@ -16,41 +16,42 @@ * limitations under the License. */ -package org.apache.hadoop.hbase.master.metrics; +package org.apache.hadoop.hbase.master; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.metrics.BaseMetricsSourceImpl; +import org.apache.hadoop.hbase.metrics.BaseSourceImpl; import org.apache.hadoop.metrics2.MetricsBuilder; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.lib.MetricMutableCounterLong; import org.apache.hadoop.metrics2.lib.MetricMutableGaugeLong; import org.apache.hadoop.metrics2.lib.MetricMutableHistogram; -/** Hadoop1 implementation of MasterMetricsSource. */ -public class MasterMetricsSourceImpl - extends BaseMetricsSourceImpl implements MasterMetricsSource { +/** + * Hadoop1 implementation of MetricsMasterSource. + */ +public class MetricsMasterSourceImpl + extends BaseSourceImpl implements MetricsMasterSource { - private static final Log LOG = LogFactory.getLog(MasterMetricsSourceImpl.class.getName()); + private static final Log LOG = LogFactory.getLog(MetricsMasterSourceImpl.class.getName()); - MetricMutableCounterLong clusterRequestsCounter; - MetricMutableGaugeLong ritGauge; - MetricMutableGaugeLong ritCountOverThresholdGauge; - MetricMutableGaugeLong ritOldestAgeGauge; - - private final MasterMetricsWrapper masterWrapper; + private final MetricsMasterWrapper masterWrapper; + private MetricMutableCounterLong clusterRequestsCounter; + private MetricMutableGaugeLong ritGauge; + private MetricMutableGaugeLong ritCountOverThresholdGauge; + private MetricMutableGaugeLong ritOldestAgeGauge; private MetricMutableHistogram splitTimeHisto; private MetricMutableHistogram splitSizeHisto; - public MasterMetricsSourceImpl(MasterMetricsWrapper masterWrapper) { + public MetricsMasterSourceImpl(MetricsMasterWrapper masterWrapper) { this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT, masterWrapper); } - public MasterMetricsSourceImpl(String metricsName, + public MetricsMasterSourceImpl(String metricsName, String metricsDescription, String metricsContext, String metricsJmxContext, - MasterMetricsWrapper masterWrapper) { + MetricsMasterWrapper masterWrapper) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); this.masterWrapper = masterWrapper; } @@ -102,15 +103,15 @@ public class MasterMetricsSourceImpl public void getMetrics(MetricsBuilder metricsBuilder, boolean all) { MetricsRecordBuilder metricsRecordBuilder = metricsBuilder.addRecord(metricsName) - .setContext(metricsContext); + .setContext(metricsContext); // masterWrapper can be null because this function is called inside of init. if (masterWrapper != null) { metricsRecordBuilder .addGauge(MASTER_ACTIVE_TIME_NAME, - MASTER_ACTIVE_TIME_DESC, masterWrapper.getMasterActiveTime()) + MASTER_ACTIVE_TIME_DESC, masterWrapper.getActiveTime()) .addGauge(MASTER_START_TIME_NAME, - MASTER_START_TIME_DESC, masterWrapper.getMasterStartTime()) + MASTER_START_TIME_DESC, masterWrapper.getStartTime()) .addGauge(AVERAGE_LOAD_NAME, AVERAGE_LOAD_DESC, masterWrapper.getAverageLoad()) .addGauge(NUM_REGION_SERVERS_NAME, NUMBER_OF_REGION_SERVERS_DESC, masterWrapper.getRegionServers()) @@ -125,7 +126,7 @@ public class MasterMetricsSourceImpl String.valueOf(masterWrapper.getIsActiveMaster())); } - metricsRegistry.snapshot(metricsRecordBuilder, true); + metricsRegistry.snapshot(metricsRecordBuilder, all); } } diff --git a/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseMetricsSourceImpl.java b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSourceImpl.java similarity index 80% rename from hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseMetricsSourceImpl.java rename to hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSourceImpl.java index 0943370b0d6..857768cf5e6 100644 --- a/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseMetricsSourceImpl.java +++ b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSourceImpl.java @@ -19,19 +19,16 @@ package org.apache.hadoop.hbase.metrics; import org.apache.hadoop.metrics2.MetricsBuilder; +import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.MetricsSource; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; -import org.apache.hadoop.metrics2.lib.DynamicMetricsRegistry; -import org.apache.hadoop.metrics2.lib.MetricMutableCounterLong; -import org.apache.hadoop.metrics2.lib.MetricMutableGaugeLong; -import org.apache.hadoop.metrics2.lib.MetricMutableHistogram; -import org.apache.hadoop.metrics2.lib.MetricMutableQuantiles; +import org.apache.hadoop.metrics2.impl.JmxCacheBuster; +import org.apache.hadoop.metrics2.lib.*; import org.apache.hadoop.metrics2.source.JvmMetricsSource; /** - * Hadoop 1 implementation of BaseMetricsSource (using metrics2 framework) + * Hadoop 1 implementation of BaseSource (using metrics2 framework) */ -public class BaseMetricsSourceImpl implements BaseMetricsSource, MetricsSource { +public class BaseSourceImpl implements BaseSource, MetricsSource { private static enum DefaultMetricsSystemInitializer { INSTANCE; @@ -46,8 +43,6 @@ public class BaseMetricsSourceImpl implements BaseMetricsSource, MetricsSource { } } - private static boolean defaultMetricsSystemInited = false; - public static final String HBASE_METRICS_SYSTEM_NAME = "hbase"; protected final DynamicMetricsRegistry metricsRegistry; protected final String metricsName; @@ -55,7 +50,7 @@ public class BaseMetricsSourceImpl implements BaseMetricsSource, MetricsSource { protected final String metricsContext; protected final String metricsJmxContext; - public BaseMetricsSourceImpl( + public BaseSourceImpl( String metricsName, String metricsDescription, String metricsContext, @@ -137,22 +132,15 @@ public class BaseMetricsSourceImpl implements BaseMetricsSource, MetricsSource { } /** - * Remove a named gauge. + * Remove a named metric. * * @param key */ - public void removeGauge(String key) { + public void removeMetric(String key) { metricsRegistry.removeMetric(key); + JmxCacheBuster.clearJmxCache(); } - /** - * Remove a named counter. - * - * @param key - */ - public void removeCounter(String key) { - metricsRegistry.removeMetric(key); - } /** * Method to export all the metrics. @@ -162,14 +150,16 @@ public class BaseMetricsSourceImpl implements BaseMetricsSource, MetricsSource { */ @Override public void getMetrics(MetricsBuilder metricsBuilder, boolean all) { - metricsRegistry.snapshot(metricsBuilder.addRecord(metricsRegistry.name()), all); + MetricsRecordBuilder mrb = metricsBuilder.addRecord(metricsName) + .setContext(metricsContext); + metricsRegistry.snapshot(mrb, all); } /** * Used to get at the DynamicMetricsRegistry. * @return DynamicMetricsRegistry */ - protected DynamicMetricsRegistry getMetricsRegistry() { + public DynamicMetricsRegistry getMetricsRegistry() { return metricsRegistry; } } diff --git a/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSourceImpl.java b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSourceImpl.java new file mode 100644 index 00000000000..658deb4e091 --- /dev/null +++ b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSourceImpl.java @@ -0,0 +1,82 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.regionserver; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.metrics.BaseSourceImpl; +import org.apache.hadoop.metrics2.MetricsBuilder; +import org.apache.hadoop.metrics2.MetricsRecordBuilder; + +import java.util.TreeSet; + +public class MetricsRegionAggregateSourceImpl extends BaseSourceImpl + implements MetricsRegionAggregateSource { + private final Log LOG = LogFactory.getLog(this.getClass()); + + private final TreeSet regionSources = + new TreeSet(); + + public MetricsRegionAggregateSourceImpl() { + this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); + } + + + public MetricsRegionAggregateSourceImpl(String metricsName, + String metricsDescription, + String metricsContext, + String metricsJmxContext) { + super(metricsName, metricsDescription, metricsContext, metricsJmxContext); + } + + @Override + public void register(MetricsRegionSource source) { + regionSources.add((MetricsRegionSourceImpl) source); + } + + @Override + public void deregister(MetricsRegionSource source) { + regionSources.remove(source); + } + + /** + * Yes this is a get function that doesn't return anything. Thanks Hadoop for breaking all + * expectations of java programmers. Instead of returning anything Hadoop metrics expects + * getMetrics to push the metrics into the metricsBuilder. + * + * @param metricsBuilder Builder to accept metrics + * @param all push all or only changed? + */ + @Override + public void getMetrics(MetricsBuilder metricsBuilder, boolean all) { + + + MetricsRecordBuilder mrb = metricsBuilder.addRecord(metricsName) + .setContext(metricsContext); + + if (regionSources != null) { + for (MetricsRegionSourceImpl regionMetricSource : regionSources) { + regionMetricSource.snapshot(mrb, all); + } + } + + + metricsRegistry.snapshot(mrb, all); + } +} diff --git a/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactoryImpl.java b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactoryImpl.java new file mode 100644 index 00000000000..dc4ae6abc7c --- /dev/null +++ b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactoryImpl.java @@ -0,0 +1,52 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.regionserver; + +/** + * Factory to create MetricsRegionServerSource when given a MetricsRegionServerWrapper + */ +public class MetricsRegionServerSourceFactoryImpl implements MetricsRegionServerSourceFactory { + private static enum FactoryStorage { + INSTANCE; + private MetricsRegionServerSource serverSource; + private MetricsRegionAggregateSourceImpl aggImpl; + } + + private synchronized MetricsRegionAggregateSourceImpl getAggregate() { + if (FactoryStorage.INSTANCE.aggImpl == null) { + FactoryStorage.INSTANCE.aggImpl = new MetricsRegionAggregateSourceImpl(); + } + return FactoryStorage.INSTANCE.aggImpl; + } + + + @Override + public synchronized MetricsRegionServerSource createServer(MetricsRegionServerWrapper regionServerWrapper) { + if (FactoryStorage.INSTANCE.serverSource == null) { + FactoryStorage.INSTANCE.serverSource = new MetricsRegionServerSourceImpl( + regionServerWrapper); + } + return FactoryStorage.INSTANCE.serverSource; + } + + @Override + public MetricsRegionSource createRegion(MetricsRegionWrapper wrapper) { + return new MetricsRegionSourceImpl(wrapper, getAggregate()); + } +} diff --git a/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java new file mode 100644 index 00000000000..cffb1c14abb --- /dev/null +++ b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java @@ -0,0 +1,161 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.regionserver; + +import org.apache.hadoop.hbase.metrics.BaseSourceImpl; +import org.apache.hadoop.metrics2.MetricHistogram; +import org.apache.hadoop.metrics2.MetricsBuilder; +import org.apache.hadoop.metrics2.MetricsRecordBuilder; + +/** + * Hadoop1 implementation of MetricsRegionServerSource. + */ +public class MetricsRegionServerSourceImpl + extends BaseSourceImpl implements MetricsRegionServerSource { + + final MetricsRegionServerWrapper rsWrap; + private final MetricHistogram putHisto; + private final MetricHistogram deleteHisto; + private final MetricHistogram getHisto; + private final MetricHistogram incrementHisto; + private final MetricHistogram appendHisto; + + public MetricsRegionServerSourceImpl(MetricsRegionServerWrapper rsWrap) { + this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT, rsWrap); + } + + public MetricsRegionServerSourceImpl(String metricsName, + String metricsDescription, + String metricsContext, + String metricsJmxContext, + MetricsRegionServerWrapper rsWrap) { + super(metricsName, metricsDescription, metricsContext, metricsJmxContext); + this.rsWrap = rsWrap; + + putHisto = getMetricsRegistry().getHistogram(PUT_KEY); + deleteHisto = getMetricsRegistry().getHistogram(DELETE_KEY); + getHisto = getMetricsRegistry().getHistogram(GET_KEY); + incrementHisto = getMetricsRegistry().getHistogram(INCREMENT_KEY); + appendHisto = getMetricsRegistry().getHistogram(APPEND_KEY); + } + + @Override + public void init() { + super.init(); + } + + @Override + public void updatePut(long t) { + putHisto.add(t); + } + + @Override + public void updateDelete(long t) { + deleteHisto.add(t); + } + + @Override + public void updateGet(long t) { + getHisto.add(t); + } + + @Override + public void updateIncrement(long t) { + incrementHisto.add(t); + } + + @Override + public void updateAppend(long t) { + appendHisto.add(t); + } + + /** + * Yes this is a get function that doesn't return anything. Thanks Hadoop for breaking all + * expectations of java programmers. Instead of returning anything Hadoop metrics expects + * getMetrics to push the metrics into the metricsBuilder. + * + * @param metricsBuilder Builder to accept metrics + * @param all push all or only changed? + */ + @Override + public void getMetrics(MetricsBuilder metricsBuilder, boolean all) { + + MetricsRecordBuilder mrb = metricsBuilder.addRecord(metricsName) + .setContext(metricsContext); + + // rsWrap can be null because this function is called inside of init. + if (rsWrap != null) { + mrb.addGauge(REGION_COUNT, REGION_COUNT_DESC, rsWrap.getNumOnlineRegions()) + .addGauge(STORE_COUNT, STORE_COUNT_DESC, rsWrap.getNumStores()) + .addGauge(STOREFILE_COUNT, STOREFILE_COUNT_DESC, rsWrap.getNumStoreFiles()) + .addGauge(MEMSTORE_SIZE, MEMSTORE_SIZE_DESC, rsWrap.getMemstoreSize()) + .addGauge(STOREFILE_SIZE, STOREFILE_SIZE_DESC, rsWrap.getStoreFileSize()) + .addGauge(RS_START_TIME_NAME, RS_START_TIME_DESC, rsWrap.getStartCode()) + .addCounter(TOTAL_REQUEST_COUNT, TOTAL_REQUEST_COUNT_DESC, rsWrap.getTotalRequestCount()) + .addCounter(READ_REQUEST_COUNT, READ_REQUEST_COUNT_DESC, rsWrap.getReadRequestsCount()) + .addCounter(WRITE_REQUEST_COUNT, WRITE_REQUEST_COUNT_DESC, rsWrap.getWriteRequestsCount()) + .addCounter(CHECK_MUTATE_FAILED_COUNT, + CHECK_MUTATE_FAILED_COUNT_DESC, + rsWrap.getCheckAndMutateChecksFailed()) + .addCounter(CHECK_MUTATE_PASSED_COUNT, + CHECK_MUTATE_PASSED_COUNT_DESC, + rsWrap.getCheckAndMutateChecksPassed()) + .addGauge(STOREFILE_INDEX_SIZE, STOREFILE_INDEX_SIZE_DESC, rsWrap.getStoreFileIndexSize()) + .addGauge(STATIC_INDEX_SIZE, STATIC_INDEX_SIZE_DESC, rsWrap.getTotalStaticIndexSize()) + .addGauge(STATIC_BLOOM_SIZE, STATIC_BLOOM_SIZE_DESC, rsWrap.getTotalStaticBloomSize()) + .addGauge(NUMBER_OF_PUTS_WITHOUT_WAL, + NUMBER_OF_PUTS_WITHOUT_WAL_DESC, + rsWrap.getNumPutsWithoutWAL()) + .addGauge(DATA_SIZE_WITHOUT_WAL, + DATA_SIZE_WITHOUT_WAL_DESC, + rsWrap.getDataInMemoryWithoutWAL()) + .addGauge(PERCENT_FILES_LOCAL, PERCENT_FILES_LOCAL_DESC, rsWrap.getPercentFileLocal()) + .addGauge(COMPACTION_QUEUE_LENGTH, + COMPACTION_QUEUE_LENGTH_DESC, + rsWrap.getCompactionQueueSize()) + .addGauge(FLUSH_QUEUE_LENGTH, FLUSH_QUEUE_LENGTH_DESC, rsWrap.getFlushQueueSize()) + .addGauge(BLOCK_CACHE_FREE_SIZE, BLOCK_CACHE_FREE_DESC, rsWrap.getBlockCacheFreeSize()) + .addGauge(BLOCK_CACHE_COUNT, BLOCK_CACHE_COUNT_DESC, rsWrap.getBlockCacheCount()) + .addGauge(BLOCK_CACHE_SIZE, BLOCK_CACHE_SIZE_DESC, rsWrap.getBlockCacheSize()) + .addCounter(BLOCK_CACHE_HIT_COUNT, + BLOCK_CACHE_HIT_COUNT_DESC, + rsWrap.getBlockCacheHitCount()) + .addCounter(BLOCK_CACHE_MISS_COUNT, + BLOCK_COUNT_MISS_COUNT_DESC, + rsWrap.getBlockCacheMissCount()) + .addCounter(BLOCK_CACHE_EVICTION_COUNT, + BLOCK_CACHE_EVICTION_COUNT_DESC, + rsWrap.getBlockCacheEvictedCount()) + .addGauge(BLOCK_CACHE_HIT_PERCENT, + BLOCK_CACHE_HIT_PERCENT_DESC, + rsWrap.getBlockCacheHitPercent()) + .addGauge(BLOCK_CACHE_EXPRESS_HIT_PERCENT, + BLOCK_CACHE_EXPRESS_HIT_PERCENT_DESC, + rsWrap.getBlockCacheHitCachingPercent()) + .addCounter(UPDATES_BLOCKED_TIME, UPDATES_BLOCKED_DESC, rsWrap.getUpdatesBlockedTime()) + .tag(ZOOKEEPER_QUORUM_NAME, ZOOKEEPER_QUORUM_DESC, rsWrap.getZookeeperQuorum()) + .tag(SERVER_NAME_NAME, SERVER_NAME_DESC, rsWrap.getServerName()) + .tag(CLUSTER_ID_NAME, CLUSTER_ID_DESC, rsWrap.getClusterId()); + } + + metricsRegistry.snapshot(mrb, all); + } + + +} diff --git a/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java new file mode 100644 index 00000000000..ea44d3a7d1c --- /dev/null +++ b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java @@ -0,0 +1,163 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.regionserver; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.metrics2.MetricsRecordBuilder; +import org.apache.hadoop.metrics2.impl.JmxCacheBuster; +import org.apache.hadoop.metrics2.lib.DynamicMetricsRegistry; +import org.apache.hadoop.metrics2.lib.MetricMutableCounterLong; + +public class MetricsRegionSourceImpl implements MetricsRegionSource { + + private final MetricsRegionWrapper regionWrapper; + private boolean closed = false; + private MetricsRegionAggregateSourceImpl agg; + private DynamicMetricsRegistry registry; + private static final Log LOG = LogFactory.getLog(MetricsRegionSourceImpl.class); + + private String regionNamePrefix; + private String regionPutKey; + private String regionDeleteKey; + private String regionGetKey; + private String regionIncrementKey; + private String regionAppendKey; + private MetricMutableCounterLong regionPut; + private MetricMutableCounterLong regionDelete; + private MetricMutableCounterLong regionGet; + private MetricMutableCounterLong regionIncrement; + private MetricMutableCounterLong regionAppend; + + public MetricsRegionSourceImpl(MetricsRegionWrapper regionWrapper, + MetricsRegionAggregateSourceImpl aggregate) { + this.regionWrapper = regionWrapper; + agg = aggregate; + agg.register(this); + + LOG.debug("Creating new MetricsRegionSourceImpl for table " + + regionWrapper.getTableName() + + " " + + regionWrapper.getRegionName()); + + registry = agg.getMetricsRegistry(); + + regionNamePrefix = "table." + regionWrapper.getTableName() + "." + + "region." + regionWrapper.getRegionName() + "."; + + String suffix = "Count"; + + + regionPutKey = regionNamePrefix + MetricsRegionServerSource.PUT_KEY + suffix; + regionPut = registry.getLongCounter(regionPutKey, 0l); + + regionDeleteKey = regionNamePrefix + MetricsRegionServerSource.DELETE_KEY + suffix; + regionDelete = registry.getLongCounter(regionDeleteKey, 0l); + + regionGetKey = regionNamePrefix + MetricsRegionServerSource.GET_KEY + suffix; + regionGet = registry.getLongCounter(regionGetKey, 0l); + + regionIncrementKey = regionNamePrefix + MetricsRegionServerSource.INCREMENT_KEY + suffix; + regionIncrement = registry.getLongCounter(regionIncrementKey, 0l); + + regionAppendKey = regionNamePrefix + MetricsRegionServerSource.APPEND_KEY + suffix; + regionAppend = registry.getLongCounter(regionAppendKey, 0l); + } + + @Override + public void close() { + closed = true; + agg.deregister(this); + + LOG.trace("Removing region Metrics: " + regionWrapper.getRegionName()); + registry.removeMetric(regionPutKey); + registry.removeMetric(regionDeleteKey); + registry.removeMetric(regionGetKey); + registry.removeMetric(regionIncrementKey); + + registry.removeMetric(regionAppendKey); + + JmxCacheBuster.clearJmxCache(); + } + + @Override + public void updatePut() { + regionPut.incr(); + } + + @Override + public void updateDelete() { + regionDelete.incr(); + } + + @Override + public void updateGet() { + regionGet.incr(); + } + + @Override + public void updateIncrement() { + regionIncrement.incr(); + } + + @Override + public void updateAppend() { + regionAppend.incr(); + } + + @Override + public MetricsRegionAggregateSource getAggregateSource() { + return agg; + } + + @Override + public int compareTo(MetricsRegionSource source) { + + if (!(source instanceof MetricsRegionSourceImpl)) + return -1; + + MetricsRegionSourceImpl impl = (MetricsRegionSourceImpl) source; + return this.regionWrapper.getRegionName() + .compareTo(impl.regionWrapper.getRegionName()); + } + + void snapshot(MetricsRecordBuilder mrb, boolean ignored) { + if (closed) return; + + mrb.addGauge(regionNamePrefix + MetricsRegionServerSource.STORE_COUNT, + MetricsRegionServerSource.STORE_COUNT_DESC, + this.regionWrapper.getNumStores()); + mrb.addGauge(regionNamePrefix + MetricsRegionServerSource.STOREFILE_COUNT, + MetricsRegionServerSource.STOREFILE_COUNT_DESC, + this.regionWrapper.getNumStoreFiles()); + mrb.addGauge(regionNamePrefix + MetricsRegionServerSource.MEMSTORE_SIZE, + MetricsRegionServerSource.MEMSTORE_SIZE_DESC, + this.regionWrapper.getMemstoreSize()); + mrb.addGauge(regionNamePrefix + MetricsRegionServerSource.STOREFILE_SIZE, + MetricsRegionServerSource.STOREFILE_SIZE_DESC, + this.regionWrapper.getStoreFileSize()); + mrb.addCounter(regionNamePrefix + MetricsRegionServerSource.READ_REQUEST_COUNT, + MetricsRegionServerSource.READ_REQUEST_COUNT_DESC, + this.regionWrapper.getReadRequestCount()); + mrb.addCounter(regionNamePrefix + MetricsRegionServerSource.WRITE_REQUEST_COUNT, + MetricsRegionServerSource.WRITE_REQUEST_COUNT_DESC, + this.regionWrapper.getWriteRequestCount()); + + } +} diff --git a/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationMetricsSourceImpl.java b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceImpl.java similarity index 75% rename from hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationMetricsSourceImpl.java rename to hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceImpl.java index 0cb8cf9392d..d8da3b37933 100644 --- a/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationMetricsSourceImpl.java +++ b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceImpl.java @@ -16,22 +16,22 @@ * limitations under the License. */ -package org.apache.hadoop.hbase.replication.regionserver.metrics; +package org.apache.hadoop.hbase.replication.regionserver; -import org.apache.hadoop.hbase.metrics.BaseMetricsSourceImpl; +import org.apache.hadoop.hbase.metrics.BaseSourceImpl; /** - * Hadoop1 implementation of ReplicationMetricsSource. This provides access to metrics gauges and + * Hadoop1 implementation of MetricsReplicationSource. This provides access to metrics gauges and * counters. */ -public class ReplicationMetricsSourceImpl extends BaseMetricsSourceImpl implements - ReplicationMetricsSource { +public class MetricsReplicationSourceImpl extends BaseSourceImpl implements + MetricsReplicationSource { - public ReplicationMetricsSourceImpl() { + public MetricsReplicationSourceImpl() { this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); } - ReplicationMetricsSourceImpl(String metricsName, + MetricsReplicationSourceImpl(String metricsName, String metricsDescription, String metricsContext, String metricsJmxContext) { diff --git a/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/rest/metrics/RESTMetricsSourceImpl.java b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSourceImpl.java similarity index 90% rename from hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/rest/metrics/RESTMetricsSourceImpl.java rename to hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSourceImpl.java index eff11dc94f2..c63aa0505b2 100644 --- a/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/rest/metrics/RESTMetricsSourceImpl.java +++ b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSourceImpl.java @@ -16,16 +16,16 @@ * limitations under the License. */ -package org.apache.hadoop.hbase.rest.metrics; +package org.apache.hadoop.hbase.rest; -import org.apache.hadoop.hbase.metrics.BaseMetricsSourceImpl; +import org.apache.hadoop.hbase.metrics.BaseSourceImpl; import org.apache.hadoop.metrics2.lib.MetricMutableCounterLong; /** * Hadoop One implementation of a metrics2 source that will export metrics from the Rest server to * the hadoop metrics2 subsystem. */ -public class RESTMetricsSourceImpl extends BaseMetricsSourceImpl implements RESTMetricsSource { +public class MetricsRESTSourceImpl extends BaseSourceImpl implements MetricsRESTSource { private MetricMutableCounterLong request; private MetricMutableCounterLong sucGet; @@ -35,11 +35,11 @@ public class RESTMetricsSourceImpl extends BaseMetricsSourceImpl implements REST private MetricMutableCounterLong fPut; private MetricMutableCounterLong fDel; - public RESTMetricsSourceImpl() { + public MetricsRESTSourceImpl() { this(METRICS_NAME, METRICS_DESCRIPTION, CONTEXT, JMX_CONTEXT); } - public RESTMetricsSourceImpl(String metricsName, + public MetricsRESTSourceImpl(String metricsName, String metricsDescription, String metricsContext, String metricsJmxContext) { diff --git a/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/thrift/metrics/ThriftServerMetricsSourceFactoryImpl.java b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactoryImpl.java similarity index 75% rename from hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/thrift/metrics/ThriftServerMetricsSourceFactoryImpl.java rename to hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactoryImpl.java index 803c657752a..8762d65e088 100644 --- a/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/thrift/metrics/ThriftServerMetricsSourceFactoryImpl.java +++ b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactoryImpl.java @@ -16,13 +16,13 @@ * limitations under the License. */ -package org.apache.hadoop.hbase.thrift.metrics; +package org.apache.hadoop.hbase.thrift; /** * Class used to create metrics sources for Thrift and Thrift2 servers in hadoop 1's compat * library. */ -public class ThriftServerMetricsSourceFactoryImpl implements ThriftServerMetricsSourceFactory { +public class MetricsThriftServerSourceFactoryImpl implements MetricsThriftServerSourceFactory { /** * A singleton used to make sure that only one thrift metrics source per server type is ever @@ -30,23 +30,23 @@ public class ThriftServerMetricsSourceFactoryImpl implements ThriftServerMetrics */ private static enum FactoryStorage { INSTANCE; - ThriftServerMetricsSourceImpl thriftOne = new ThriftServerMetricsSourceImpl(METRICS_NAME, + MetricsThriftServerSourceImpl thriftOne = new MetricsThriftServerSourceImpl(METRICS_NAME, METRICS_DESCRIPTION, THRIFT_ONE_METRICS_CONTEXT, THRIFT_ONE_JMX_CONTEXT); - ThriftServerMetricsSourceImpl thriftTwo = new ThriftServerMetricsSourceImpl(METRICS_NAME, + MetricsThriftServerSourceImpl thriftTwo = new MetricsThriftServerSourceImpl(METRICS_NAME, METRICS_DESCRIPTION, THRIFT_TWO_METRICS_CONTEXT, THRIFT_TWO_JMX_CONTEXT); } @Override - public ThriftServerMetricsSource createThriftOneSource() { + public MetricsThriftServerSource createThriftOneSource() { return FactoryStorage.INSTANCE.thriftOne; } @Override - public ThriftServerMetricsSource createThriftTwoSource() { + public MetricsThriftServerSource createThriftTwoSource() { return FactoryStorage.INSTANCE.thriftTwo; } } diff --git a/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/thrift/metrics/ThriftServerMetricsSourceImpl.java b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceImpl.java similarity index 86% rename from hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/thrift/metrics/ThriftServerMetricsSourceImpl.java rename to hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceImpl.java index 7e5d0c4b6a2..6d57186eb7a 100644 --- a/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/thrift/metrics/ThriftServerMetricsSourceImpl.java +++ b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceImpl.java @@ -16,18 +16,17 @@ * limitations under the License. */ -package org.apache.hadoop.hbase.thrift.metrics; +package org.apache.hadoop.hbase.thrift; -import org.apache.hadoop.hbase.metrics.BaseMetricsSourceImpl; -import org.apache.hadoop.hbase.thrift.metrics.ThriftServerMetricsSource; +import org.apache.hadoop.hbase.metrics.BaseSourceImpl; import org.apache.hadoop.metrics2.lib.MetricMutableGaugeLong; import org.apache.hadoop.metrics2.lib.MetricMutableStat; /** - * Hadoop 1 version of ThriftServerMetricsSource{@link ThriftServerMetricsSource} + * Hadoop 1 version of MetricsThriftServerSource{@link MetricsThriftServerSource} */ -public class ThriftServerMetricsSourceImpl extends BaseMetricsSourceImpl implements - ThriftServerMetricsSource { +public class MetricsThriftServerSourceImpl extends BaseSourceImpl implements + MetricsThriftServerSource { private MetricMutableStat batchGetStat; @@ -39,7 +38,7 @@ public class ThriftServerMetricsSourceImpl extends BaseMetricsSourceImpl impleme private MetricMutableGaugeLong callQueueLenGauge; - public ThriftServerMetricsSourceImpl(String metricsName, + public MetricsThriftServerSourceImpl(String metricsName, String metricsDescription, String metricsContext, String metricsJmxContext) { diff --git a/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java new file mode 100644 index 00000000000..1e2eb2f4fa6 --- /dev/null +++ b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java @@ -0,0 +1,52 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.metrics2.impl; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; + +/** + * JMX caches the beans that have been exported; even after the values are removed from hadoop's + * metrics system the keys and old values will still remain. This class stops and restarts the + * Hadoop metrics system, forcing JMX to clear the cache of exported metrics. + * + * This class need to be in the o.a.h.metrics2.impl namespace as many of the variables/calls used + * are package private. + */ +public class JmxCacheBuster { + private static final Log LOG = LogFactory.getLog(JmxCacheBuster.class); + + /** + * For JMX to forget about all previously exported metrics. + */ + public static void clearJmxCache() { + LOG.trace("Clearing JMX mbean cache."); + + // This is pretty extreme but it's the best way that + // I could find to get metrics to be removed. + + try { + DefaultMetricsSystem.INSTANCE.stop(); + DefaultMetricsSystem.INSTANCE.start(); + } catch (Exception exception ) { + LOG.debug("error clearing the jmx it appears the metrics system hasn't been started", exception); + } + } +} diff --git a/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java index 04fb2a96dd5..3f0bc47eecf 100644 --- a/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java +++ b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java @@ -23,6 +23,8 @@ import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.metrics2.MetricsException; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.MetricsTag; @@ -39,6 +41,8 @@ import org.apache.hadoop.metrics2.MetricsTag; */ public class DynamicMetricsRegistry { + private final Log LOG = LogFactory.getLog(this.getClass()); + /** key for the context tag */ public static final String CONTEXT_KEY = "context"; /** description for the context tag */ @@ -284,6 +288,7 @@ public class DynamicMetricsRegistry { * @param all get all the metrics even if the values are not changed. */ public void snapshot(MetricsRecordBuilder builder, boolean all) { + for (Entry entry : tags()) { builder.add(entry.getValue()); } diff --git a/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricMutableHistogram.java b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricMutableHistogram.java index 166af08d8f9..b7c24dd0e38 100644 --- a/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricMutableHistogram.java +++ b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricMutableHistogram.java @@ -21,9 +21,8 @@ package org.apache.hadoop.metrics2.lib; import com.yammer.metrics.stats.ExponentiallyDecayingSample; import com.yammer.metrics.stats.Sample; import com.yammer.metrics.stats.Snapshot; -import org.apache.hadoop.metrics.MetricHistogram; +import org.apache.hadoop.metrics2.MetricHistogram; import org.apache.hadoop.metrics2.MetricsRecordBuilder; -import org.apache.hadoop.metrics2.lib.MetricMutable; import java.util.concurrent.atomic.AtomicLong; diff --git a/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricMutableQuantiles.java b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricMutableQuantiles.java index 7f4b71b9236..e80095f96fb 100644 --- a/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricMutableQuantiles.java +++ b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricMutableQuantiles.java @@ -20,8 +20,8 @@ package org.apache.hadoop.metrics2.lib; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.metrics.MetricHistogram; -import org.apache.hadoop.metrics.MetricsExecutor; +import org.apache.hadoop.metrics2.MetricHistogram; +import org.apache.hadoop.metrics2.MetricsExecutor; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.util.MetricQuantile; import org.apache.hadoop.metrics2.util.MetricSampleQuantiles; diff --git a/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricsExecutorImpl.java b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricsExecutorImpl.java index 31357581891..d47912c273c 100644 --- a/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricsExecutorImpl.java +++ b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricsExecutorImpl.java @@ -18,7 +18,7 @@ package org.apache.hadoop.metrics2.lib; -import org.apache.hadoop.metrics.MetricsExecutor; +import org.apache.hadoop.metrics2.MetricsExecutor; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledThreadPoolExecutor; diff --git a/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.MetricsMasterSourceFactory b/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.MetricsMasterSourceFactory new file mode 100644 index 00000000000..a5e43e4fcd2 --- /dev/null +++ b/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.MetricsMasterSourceFactory @@ -0,0 +1 @@ +org.apache.hadoop.hbase.master.MetricsMasterSourceFactoryImpl \ No newline at end of file diff --git a/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.metrics.MasterMetricsSourceFactory b/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.metrics.MasterMetricsSourceFactory deleted file mode 100644 index e81c3dcc43f..00000000000 --- a/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.metrics.MasterMetricsSourceFactory +++ /dev/null @@ -1 +0,0 @@ -org.apache.hadoop.hbase.master.metrics.MasterMetricsSourceFactoryImpl diff --git a/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactory b/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactory new file mode 100644 index 00000000000..bc2f6430478 --- /dev/null +++ b/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactory @@ -0,0 +1 @@ +org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactoryImpl diff --git a/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSource b/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSource new file mode 100644 index 00000000000..1e0dd200e6f --- /dev/null +++ b/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSource @@ -0,0 +1 @@ +org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceImpl \ No newline at end of file diff --git a/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.replication.regionserver.metrics.ReplicationMetricsSource b/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.replication.regionserver.metrics.ReplicationMetricsSource deleted file mode 100644 index bb64ad5ba0d..00000000000 --- a/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.replication.regionserver.metrics.ReplicationMetricsSource +++ /dev/null @@ -1 +0,0 @@ -org.apache.hadoop.hbase.replication.regionserver.metrics.ReplicationMetricsSourceImpl \ No newline at end of file diff --git a/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.rest.MetricsRESTSource b/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.rest.MetricsRESTSource new file mode 100644 index 00000000000..5a4a8e9c044 --- /dev/null +++ b/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.rest.MetricsRESTSource @@ -0,0 +1 @@ +org.apache.hadoop.hbase.rest.MetricsRESTSourceImpl \ No newline at end of file diff --git a/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.rest.metrics.RESTMetricsSource b/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.rest.metrics.RESTMetricsSource deleted file mode 100644 index 9e7a28d7b9b..00000000000 --- a/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.rest.metrics.RESTMetricsSource +++ /dev/null @@ -1 +0,0 @@ -org.apache.hadoop.hbase.rest.metrics.RESTMetricsSourceImpl \ No newline at end of file diff --git a/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactory b/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactory new file mode 100644 index 00000000000..2b5c16338cd --- /dev/null +++ b/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactory @@ -0,0 +1 @@ +org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactoryImpl \ No newline at end of file diff --git a/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.thrift.metrics.ThriftServerMetricsSourceFactory b/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.thrift.metrics.ThriftServerMetricsSourceFactory deleted file mode 100644 index 62d1c6a9325..00000000000 --- a/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.thrift.metrics.ThriftServerMetricsSourceFactory +++ /dev/null @@ -1 +0,0 @@ -org.apache.hadoop.hbase.thrift.metrics.ThriftServerMetricsSourceFactoryImpl \ No newline at end of file diff --git a/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.metrics2.MetricsExecutor b/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.metrics2.MetricsExecutor new file mode 100644 index 00000000000..dc120525ba8 --- /dev/null +++ b/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.metrics2.MetricsExecutor @@ -0,0 +1 @@ +org.apache.hadoop.metrics2.lib.MetricsExecutorImpl diff --git a/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/master/metrics/TestMasterMetricsSourceImpl.java b/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterSourceImpl.java similarity index 60% rename from hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/master/metrics/TestMasterMetricsSourceImpl.java rename to hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterSourceImpl.java index fe384d7f405..4cdd60677fe 100644 --- a/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/master/metrics/TestMasterMetricsSourceImpl.java +++ b/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterSourceImpl.java @@ -16,26 +16,29 @@ * limitations under the License. */ -package org.apache.hadoop.hbase.master.metrics; +package org.apache.hadoop.hbase.master; import org.apache.hadoop.hbase.CompatibilitySingletonFactory; +import org.apache.hadoop.hbase.master.MetricsMasterSource; +import org.apache.hadoop.hbase.master.MetricsMasterSourceFactory; +import org.apache.hadoop.hbase.master.MetricsMasterSourceImpl; import org.junit.Test; import static org.junit.Assert.assertSame; import static org.junit.Assert.assertTrue; /** - * Test for MasterMetricsSourceImpl + * Test for MetricsMasterSourceImpl */ -public class TestMasterMetricsSourceImpl { +public class TestMetricsMasterSourceImpl { @Test public void testGetInstance() throws Exception { - MasterMetricsSourceFactory masterMetricsSourceFactory = CompatibilitySingletonFactory - .getInstance(MasterMetricsSourceFactory.class); - MasterMetricsSource masterMetricsSource = masterMetricsSourceFactory.create(null); - assertTrue(masterMetricsSource instanceof MasterMetricsSourceImpl); - assertSame(masterMetricsSourceFactory, CompatibilitySingletonFactory.getInstance(MasterMetricsSourceFactory.class)); + MetricsMasterSourceFactory metricsMasterSourceFactory = CompatibilitySingletonFactory + .getInstance(MetricsMasterSourceFactory.class); + MetricsMasterSource masterSource = metricsMasterSourceFactory.create(null); + assertTrue(masterSource instanceof MetricsMasterSourceImpl); + assertSame(metricsMasterSourceFactory, CompatibilitySingletonFactory.getInstance(MetricsMasterSourceFactory.class)); } } diff --git a/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseMetricsSourceImplTest.java b/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseSourceImpl.java similarity index 83% rename from hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseMetricsSourceImplTest.java rename to hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseSourceImpl.java index 095cb14e04b..400609bd6dc 100644 --- a/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseMetricsSourceImplTest.java +++ b/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseSourceImpl.java @@ -28,15 +28,15 @@ import static org.junit.Assert.assertNull; import static org.junit.Assert.assertSame; /** - * Test of the default BaseMetricsSource implementation for hadoop 1 + * Test of the default BaseSource implementation for hadoop 1 */ -public class TestBaseMetricsSourceImplTest { +public class TestBaseSourceImpl { - private static BaseMetricsSourceImpl bmsi; + private static BaseSourceImpl bmsi; @BeforeClass public static void setUp() throws Exception { - bmsi = new BaseMetricsSourceImpl("TestName", "test description", "testcontext", "TestContext"); + bmsi = new BaseSourceImpl("TestName", "test description", "testcontext", "TestContext"); } @Test @@ -81,17 +81,11 @@ public class TestBaseMetricsSourceImplTest { } @Test - public void testRemoveGauge() throws Exception { + public void testRemoveMetric() throws Exception { bmsi.setGauge("testrm", 100); - bmsi.removeGauge("testrm"); + bmsi.removeMetric("testrm"); assertNull(bmsi.metricsRegistry.get("testrm")); } - @Test - public void testRemoveCounter() throws Exception { - bmsi.incCounters("testrm", 100); - bmsi.removeCounter("testrm"); - assertNull(bmsi.metricsRegistry.get("testrm")); - } } diff --git a/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerSourceImpl.java b/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerSourceImpl.java new file mode 100644 index 00000000000..7509bf51dce --- /dev/null +++ b/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerSourceImpl.java @@ -0,0 +1,51 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.regionserver; + +import org.apache.hadoop.hbase.CompatibilitySingletonFactory; +import org.junit.Test; + +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +/** + * Test for MetricsRegionServerSourceImpl + */ +public class TestMetricsRegionServerSourceImpl { + + @Test + public void testGetInstance() throws Exception { + MetricsRegionServerSourceFactory metricsRegionServerSourceFactory = + CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class); + MetricsRegionServerSource serverSource = + metricsRegionServerSourceFactory.createServer(null); + assertTrue(serverSource instanceof MetricsRegionServerSourceImpl); + assertSame(metricsRegionServerSourceFactory, + CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class)); + } + + + @Test(expected = RuntimeException.class) + public void testNoGetRegionServerMetricsSourceImpl() throws Exception { + // This should throw an exception because MetricsRegionServerSourceImpl should only + // be created by a factory. + CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceImpl.class); + } + +} diff --git a/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java b/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java new file mode 100644 index 00000000000..89c0762e6d9 --- /dev/null +++ b/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java @@ -0,0 +1,101 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.regionserver; + +import org.apache.hadoop.hbase.CompatibilitySingletonFactory; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +public class TestMetricsRegionSourceImpl { + + @Test + public void testCompareTo() throws Exception { + MetricsRegionServerSourceFactory fact = CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class); + + MetricsRegionSource one = fact.createRegion(new RegionWrapperStub("TEST")); + MetricsRegionSource oneClone = fact.createRegion(new RegionWrapperStub("TEST")); + MetricsRegionSource two = fact.createRegion(new RegionWrapperStub("TWO")); + + assertEquals(0, one.compareTo(oneClone)); + + assertTrue( one.compareTo(two) < 0); + assertTrue( two.compareTo(one) > 0); + } + + + @Test(expected = RuntimeException.class) + public void testNoGetRegionServerMetricsSourceImpl() throws Exception { + // This should throw an exception because MetricsRegionSourceImpl should only + // be created by a factory. + CompatibilitySingletonFactory.getInstance(MetricsRegionSource.class); + } + + class RegionWrapperStub implements MetricsRegionWrapper { + + private String regionName; + + public RegionWrapperStub(String regionName) { + + + this.regionName = regionName; + } + + @Override + public String getTableName() { + return null; //To change body of implemented methods use File | Settings | File Templates. + } + + @Override + public String getRegionName() { + return this.regionName; + } + + @Override + public long getNumStores() { + return 0; //To change body of implemented methods use File | Settings | File Templates. + } + + @Override + public long getNumStoreFiles() { + return 0; //To change body of implemented methods use File | Settings | File Templates. + } + + @Override + public long getMemstoreSize() { + return 0; //To change body of implemented methods use File | Settings | File Templates. + } + + @Override + public long getStoreFileSize() { + return 0; //To change body of implemented methods use File | Settings | File Templates. + } + + @Override + public long getReadRequestCount() { + return 0; //To change body of implemented methods use File | Settings | File Templates. + } + + @Override + public long getWriteRequestCount() { + return 0; //To change body of implemented methods use File | Settings | File Templates. + } + } +} diff --git a/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/metrics/TestReplicationMetricsSourceImpl.java b/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationMetricsSourceImpl.java similarity index 69% rename from hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/metrics/TestReplicationMetricsSourceImpl.java rename to hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationMetricsSourceImpl.java index 411d5beacd1..dd1c3a70f97 100644 --- a/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/metrics/TestReplicationMetricsSourceImpl.java +++ b/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationMetricsSourceImpl.java @@ -16,22 +16,24 @@ * limitations under the License. */ -package org.apache.hadoop.hbase.replication.regionserver.metrics; +package org.apache.hadoop.hbase.replication.regionserver; import org.apache.hadoop.hbase.CompatibilitySingletonFactory; +import org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSource; +import org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceImpl; import org.junit.Test; import static org.junit.Assert.assertTrue; /** - * Test to make sure that ReplicationMetricsSourceImpl is hooked up to ServiceLoader + * Test to make sure that MetricsReplicationSourceImpl is hooked up to ServiceLoader */ public class TestReplicationMetricsSourceImpl { @Test public void testGetInstance() throws Exception { - ReplicationMetricsSource rms = CompatibilitySingletonFactory - .getInstance(ReplicationMetricsSource.class); - assertTrue(rms instanceof ReplicationMetricsSourceImpl); + MetricsReplicationSource rms = CompatibilitySingletonFactory + .getInstance(MetricsReplicationSource.class); + assertTrue(rms instanceof MetricsReplicationSourceImpl); } } diff --git a/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/rest/metrics/TestRESTMetricsSourceImpl.java b/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/rest/TestRESTMetricsSourceImpl.java similarity index 73% rename from hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/rest/metrics/TestRESTMetricsSourceImpl.java rename to hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/rest/TestRESTMetricsSourceImpl.java index 3f309eba437..30ffd6ea812 100644 --- a/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/rest/metrics/TestRESTMetricsSourceImpl.java +++ b/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/rest/TestRESTMetricsSourceImpl.java @@ -16,23 +16,25 @@ * limitations under the License. */ -package org.apache.hadoop.hbase.rest.metrics; +package org.apache.hadoop.hbase.rest; import org.apache.hadoop.hbase.CompatibilitySingletonFactory; +import org.apache.hadoop.hbase.rest.MetricsRESTSource; +import org.apache.hadoop.hbase.rest.MetricsRESTSourceImpl; import org.junit.Test; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; /** - * Test for hadoop1's version of RESTMetricsSource + * Test for hadoop1's version of MetricsRESTSource */ public class TestRESTMetricsSourceImpl { @Test public void ensureCompatRegistered() throws Exception { - assertNotNull(CompatibilitySingletonFactory.getInstance(RESTMetricsSource.class)); - assertTrue(CompatibilitySingletonFactory.getInstance(RESTMetricsSource.class) instanceof RESTMetricsSourceImpl); + assertNotNull(CompatibilitySingletonFactory.getInstance(MetricsRESTSource.class)); + assertTrue(CompatibilitySingletonFactory.getInstance(MetricsRESTSource.class) instanceof MetricsRESTSourceImpl); } } diff --git a/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelperImpl.java b/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelperImpl.java index 346047c623d..a54a3ee25e1 100644 --- a/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelperImpl.java +++ b/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelperImpl.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hbase.test; -import org.apache.hadoop.hbase.metrics.BaseMetricsSource; -import org.apache.hadoop.hbase.metrics.BaseMetricsSourceImpl; +import org.apache.hadoop.hbase.metrics.BaseSource; +import org.apache.hadoop.hbase.metrics.BaseSourceImpl; import org.apache.hadoop.metrics2.Metric; import org.apache.hadoop.metrics2.MetricsBuilder; import org.apache.hadoop.metrics2.MetricsRecordBuilder; @@ -110,68 +110,68 @@ public class MetricsAssertHelperImpl implements MetricsAssertHelper { } @Override - public void assertTag(String name, String expected, BaseMetricsSource source) { + public void assertTag(String name, String expected, BaseSource source) { getMetrics(source); String cName = canonicalizeMetricName(name); assertEquals("Tags should be equal", expected, tags.get(cName)); } @Override - public void assertGauge(String name, long expected, BaseMetricsSource source) { + public void assertGauge(String name, long expected, BaseSource source) { long found = getGaugeLong(name, source); assertEquals("Metrics Should be equal", (long) Long.valueOf(expected), found); } @Override - public void assertGaugeGt(String name, long expected, BaseMetricsSource source) { + public void assertGaugeGt(String name, long expected, BaseSource source) { double found = getGaugeDouble(name, source); assertTrue(name + " (" + found + ") should be greater than " + expected, found > expected); } @Override - public void assertGaugeLt(String name, long expected, BaseMetricsSource source) { + public void assertGaugeLt(String name, long expected, BaseSource source) { double found = getGaugeDouble(name, source); assertTrue(name + "(" + found + ") should be less than " + expected, found < expected); } @Override - public void assertGauge(String name, double expected, BaseMetricsSource source) { + public void assertGauge(String name, double expected, BaseSource source) { double found = getGaugeDouble(name, source); - assertEquals("Metrics Should be equal", (double) Double.valueOf(expected), found); + assertEquals("Metrics Should be equal", (double) Double.valueOf(expected), found, 0.01); } @Override - public void assertGaugeGt(String name, double expected, BaseMetricsSource source) { + public void assertGaugeGt(String name, double expected, BaseSource source) { double found = getGaugeDouble(name, source); assertTrue(name + "(" + found + ") should be greater than " + expected, found > expected); } @Override - public void assertGaugeLt(String name, double expected, BaseMetricsSource source) { + public void assertGaugeLt(String name, double expected, BaseSource source) { double found = getGaugeDouble(name, source); assertTrue(name + "(" + found + ") should be less than " + expected, found < expected); } @Override - public void assertCounter(String name, long expected, BaseMetricsSource source) { + public void assertCounter(String name, long expected, BaseSource source) { long found = getCounter(name, source); assertEquals("Metrics Counters should be equal", (long) Long.valueOf(expected), found); } @Override - public void assertCounterGt(String name, long expected, BaseMetricsSource source) { + public void assertCounterGt(String name, long expected, BaseSource source) { long found = getCounter(name, source); assertTrue(name + " (" + found + ") should be greater than " + expected, found > expected); } @Override - public void assertCounterLt(String name, long expected, BaseMetricsSource source) { + public void assertCounterLt(String name, long expected, BaseSource source) { long found = getCounter(name, source); assertTrue(name + "(" + found + ") should be less than " + expected, found < expected); } @Override - public long getCounter(String name, BaseMetricsSource source) { + public long getCounter(String name, BaseSource source) { getMetrics(source); String cName = canonicalizeMetricName(name); assertNotNull(counters.get(cName)); @@ -179,7 +179,7 @@ public class MetricsAssertHelperImpl implements MetricsAssertHelper { } @Override - public double getGaugeDouble(String name, BaseMetricsSource source) { + public double getGaugeDouble(String name, BaseSource source) { getMetrics(source); String cName = canonicalizeMetricName(name); assertNotNull(gauges.get(cName)); @@ -187,7 +187,7 @@ public class MetricsAssertHelperImpl implements MetricsAssertHelper { } @Override - public long getGaugeLong(String name, BaseMetricsSource source) { + public long getGaugeLong(String name, BaseSource source) { getMetrics(source); String cName = canonicalizeMetricName(name); assertNotNull(gauges.get(cName)); @@ -200,12 +200,12 @@ public class MetricsAssertHelperImpl implements MetricsAssertHelper { counters.clear(); } - private void getMetrics(BaseMetricsSource source) { + private void getMetrics(BaseSource source) { reset(); - if (!(source instanceof BaseMetricsSourceImpl)) { + if (!(source instanceof BaseSourceImpl)) { assertTrue(false); } - BaseMetricsSourceImpl impl = (BaseMetricsSourceImpl) source; + BaseSourceImpl impl = (BaseSourceImpl) source; impl.getMetrics(new MockMetricsBuilder(), true); diff --git a/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/thrift/metrics/TestThriftServerMetricsSourceFactoryImpl.java b/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServerMetricsSourceFactoryImpl.java similarity index 67% rename from hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/thrift/metrics/TestThriftServerMetricsSourceFactoryImpl.java rename to hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServerMetricsSourceFactoryImpl.java index c7b362fd2ba..c768399b815 100644 --- a/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/thrift/metrics/TestThriftServerMetricsSourceFactoryImpl.java +++ b/hbase-hadoop1-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServerMetricsSourceFactoryImpl.java @@ -16,9 +16,11 @@ * limitations under the License. */ -package org.apache.hadoop.hbase.thrift.metrics; +package org.apache.hadoop.hbase.thrift; import org.apache.hadoop.hbase.CompatibilitySingletonFactory; +import org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactory; +import org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactoryImpl; import org.junit.Test; import static org.junit.Assert.assertNotNull; @@ -26,28 +28,28 @@ import static org.junit.Assert.assertSame; import static org.junit.Assert.assertTrue; /** - * Test the hadoop 1 version of ThriftServerMetricsSourceFactory + * Test the hadoop 1 version of MetricsThriftServerSourceFactory */ public class TestThriftServerMetricsSourceFactoryImpl { @Test public void testCompatabilityRegistered() throws Exception { - assertNotNull(CompatibilitySingletonFactory.getInstance(ThriftServerMetricsSourceFactory.class)); - assertTrue(CompatibilitySingletonFactory.getInstance(ThriftServerMetricsSourceFactory.class) instanceof ThriftServerMetricsSourceFactoryImpl); + assertNotNull(CompatibilitySingletonFactory.getInstance(MetricsThriftServerSourceFactory.class)); + assertTrue(CompatibilitySingletonFactory.getInstance(MetricsThriftServerSourceFactory.class) instanceof MetricsThriftServerSourceFactoryImpl); } @Test public void testCreateThriftOneSource() throws Exception { //Make sure that the factory gives back a singleton. - assertSame(new ThriftServerMetricsSourceFactoryImpl().createThriftOneSource(), - new ThriftServerMetricsSourceFactoryImpl().createThriftOneSource()); + assertSame(new MetricsThriftServerSourceFactoryImpl().createThriftOneSource(), + new MetricsThriftServerSourceFactoryImpl().createThriftOneSource()); } @Test public void testCreateThriftTwoSource() throws Exception { //Make sure that the factory gives back a singleton. - assertSame(new ThriftServerMetricsSourceFactoryImpl().createThriftTwoSource(), - new ThriftServerMetricsSourceFactoryImpl().createThriftTwoSource()); + assertSame(new MetricsThriftServerSourceFactoryImpl().createThriftTwoSource(), + new MetricsThriftServerSourceFactoryImpl().createThriftTwoSource()); } } diff --git a/hbase-hadoop2-compat/pom.xml b/hbase-hadoop2-compat/pom.xml index df8e764f5f4..754ea1016a1 100644 --- a/hbase-hadoop2-compat/pom.xml +++ b/hbase-hadoop2-compat/pom.xml @@ -138,6 +138,10 @@ limitations under the License. com.yammer.metrics metrics-core + + log4j + log4j + diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceFactoryImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactoryImpl.java similarity index 63% rename from hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceFactoryImpl.java rename to hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactoryImpl.java index 4a170462127..350c39d9152 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceFactoryImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactoryImpl.java @@ -16,22 +16,22 @@ * limitations under the License. */ -package org.apache.hadoop.hbase.master.metrics; +package org.apache.hadoop.hbase.master; /** - * Factory to create MasterMetricsSource when given a MasterMetricsWrapper + * Factory to create MetricsMasterSource when given a MetricsMasterWrapper */ -public class MasterMetricsSourceFactoryImpl implements MasterMetricsSourceFactory { +public class MetricsMasterSourceFactoryImpl implements MetricsMasterSourceFactory { private static enum FactoryStorage { INSTANCE; - MasterMetricsSource source; + MetricsMasterSource masterSource; } @Override - public synchronized MasterMetricsSource create(MasterMetricsWrapper beanWrapper) { - if (FactoryStorage.INSTANCE.source == null ) { - FactoryStorage.INSTANCE.source = new MasterMetricsSourceImpl(beanWrapper); + public synchronized MetricsMasterSource create(MetricsMasterWrapper masterWrapper) { + if (FactoryStorage.INSTANCE.masterSource == null) { + FactoryStorage.INSTANCE.masterSource = new MetricsMasterSourceImpl(masterWrapper); } - return FactoryStorage.INSTANCE.source; + return FactoryStorage.INSTANCE.masterSource; } } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java similarity index 79% rename from hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceImpl.java rename to hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java index 90baeddd3ec..ccc060335dc 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java @@ -16,9 +16,9 @@ * limitations under the License. */ -package org.apache.hadoop.hbase.master.metrics; +package org.apache.hadoop.hbase.master; -import org.apache.hadoop.hbase.metrics.BaseMetricsSourceImpl; +import org.apache.hadoop.hbase.metrics.BaseSourceImpl; import org.apache.hadoop.metrics2.MetricsCollector; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.lib.Interns; @@ -26,39 +26,40 @@ import org.apache.hadoop.metrics2.lib.MutableCounterLong; import org.apache.hadoop.metrics2.lib.MutableGaugeLong; import org.apache.hadoop.metrics2.lib.MutableHistogram; -/** Hadoop2 implementation of MasterMetricsSource. */ -public class MasterMetricsSourceImpl - extends BaseMetricsSourceImpl implements MasterMetricsSource { +/** + * Hadoop2 implementation of MetricsMasterSource. + */ +public class MetricsMasterSourceImpl + extends BaseSourceImpl implements MetricsMasterSource { - - MutableCounterLong clusterRequestsCounter; - MutableGaugeLong ritGauge; - MutableGaugeLong ritCountOverThresholdGauge; - MutableGaugeLong ritOldestAgeGauge; - private final MasterMetricsWrapper masterWrapper; + private final MetricsMasterWrapper masterWrapper; + private MutableCounterLong clusterRequestsCounter; + private MutableGaugeLong ritGauge; + private MutableGaugeLong ritCountOverThresholdGauge; + private MutableGaugeLong ritOldestAgeGauge; private MutableHistogram splitTimeHisto; private MutableHistogram splitSizeHisto; - public MasterMetricsSourceImpl(MasterMetricsWrapper masterMetricsWrapper) { + public MetricsMasterSourceImpl(MetricsMasterWrapper masterWrapper) { this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT, - masterMetricsWrapper); + masterWrapper); } - public MasterMetricsSourceImpl(String metricsName, + public MetricsMasterSourceImpl(String metricsName, String metricsDescription, String metricsContext, String metricsJmxContext, - MasterMetricsWrapper masterWrapper) { + MetricsMasterWrapper masterWrapper) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); this.masterWrapper = masterWrapper; } - @Override - public void init() { + @Override + public void init() { super.init(); clusterRequestsCounter = metricsRegistry.newCounter(CLUSTER_REQUESTS_NAME, "", 0l); ritGauge = metricsRegistry.newGauge(RIT_COUNT_NAME, "", 0l); @@ -98,15 +99,15 @@ public class MasterMetricsSourceImpl public void getMetrics(MetricsCollector metricsCollector, boolean all) { MetricsRecordBuilder metricsRecordBuilder = metricsCollector.addRecord(metricsName) - .setContext(metricsContext); + .setContext(metricsContext); // masterWrapper can be null because this function is called inside of init. if (masterWrapper != null) { metricsRecordBuilder .addGauge(Interns.info(MASTER_ACTIVE_TIME_NAME, - MASTER_ACTIVE_TIME_DESC), masterWrapper.getMasterActiveTime()) + MASTER_ACTIVE_TIME_DESC), masterWrapper.getActiveTime()) .addGauge(Interns.info(MASTER_START_TIME_NAME, - MASTER_START_TIME_DESC), masterWrapper.getMasterStartTime()) + MASTER_START_TIME_DESC), masterWrapper.getStartTime()) .addGauge(Interns.info(AVERAGE_LOAD_NAME, AVERAGE_LOAD_DESC), masterWrapper.getAverageLoad()) .addGauge(Interns.info(NUM_REGION_SERVERS_NAME, @@ -123,7 +124,7 @@ public class MasterMetricsSourceImpl String.valueOf(masterWrapper.getIsActiveMaster())); } - metricsRegistry.snapshot(metricsRecordBuilder, true); + metricsRegistry.snapshot(metricsRecordBuilder, all); } } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseMetricsSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSourceImpl.java similarity index 90% rename from hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseMetricsSourceImpl.java rename to hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSourceImpl.java index 7e37089ca8d..20779512a55 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseMetricsSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSourceImpl.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.metrics; import org.apache.hadoop.metrics2.MetricsCollector; import org.apache.hadoop.metrics2.MetricsSource; +import org.apache.hadoop.metrics2.impl.JmxCacheBuster; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.lib.DynamicMetricsRegistry; import org.apache.hadoop.metrics2.lib.MetricMutableQuantiles; @@ -29,9 +30,9 @@ import org.apache.hadoop.metrics2.lib.MutableHistogram; import org.apache.hadoop.metrics2.source.JvmMetrics; /** - * Hadoop 2 implementation of BaseMetricsSource (using metrics2 framework) + * Hadoop 2 implementation of BaseSource (using metrics2 framework) */ -public class BaseMetricsSourceImpl implements BaseMetricsSource, MetricsSource { +public class BaseSourceImpl implements BaseSource, MetricsSource { private static enum DefaultMetricsSystemInitializer { INSTANCE; @@ -47,15 +48,13 @@ public class BaseMetricsSourceImpl implements BaseMetricsSource, MetricsSource { } } - public static final String HBASE_METRICS_SYSTEM_NAME = "hbase"; - protected final DynamicMetricsRegistry metricsRegistry; protected final String metricsName; protected final String metricsDescription; protected final String metricsContext; protected final String metricsJmxContext; - public BaseMetricsSourceImpl( + public BaseSourceImpl( String metricsName, String metricsDescription, String metricsContext, @@ -141,20 +140,12 @@ public class BaseMetricsSourceImpl implements BaseMetricsSource, MetricsSource { * * @param key */ - public void removeGauge(String key) { + public void removeMetric(String key) { metricsRegistry.removeMetric(key); + JmxCacheBuster.clearJmxCache(); } - /** - * Remove a named counter. - * - * @param key - */ - public void removeCounter(String key) { - metricsRegistry.removeMetric(key); - } - - protected DynamicMetricsRegistry getMetricsRegistry() { + public DynamicMetricsRegistry getMetricsRegistry() { return metricsRegistry; } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSourceImpl.java new file mode 100644 index 00000000000..8fea559bde7 --- /dev/null +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSourceImpl.java @@ -0,0 +1,82 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.regionserver; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.metrics.BaseSourceImpl; +import org.apache.hadoop.metrics2.MetricsCollector; +import org.apache.hadoop.metrics2.MetricsRecordBuilder; + +import java.util.TreeSet; + +public class MetricsRegionAggregateSourceImpl extends BaseSourceImpl + implements MetricsRegionAggregateSource { + + private final Log LOG = LogFactory.getLog(this.getClass()); + + private final TreeSet regionSources = + new TreeSet(); + + public MetricsRegionAggregateSourceImpl() { + this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); + } + + + public MetricsRegionAggregateSourceImpl(String metricsName, + String metricsDescription, + String metricsContext, + String metricsJmxContext) { + super(metricsName, metricsDescription, metricsContext, metricsJmxContext); + } + + @Override + public void register(MetricsRegionSource source) { + regionSources.add((MetricsRegionSourceImpl) source); + } + + @Override + public void deregister(MetricsRegionSource source) { + regionSources.remove(source); + } + + /** + * Yes this is a get function that doesn't return anything. Thanks Hadoop for breaking all + * expectations of java programmers. Instead of returning anything Hadoop metrics expects + * getMetrics to push the metrics into the collector. + * + * @param collector the collector + * @param all get all the metrics regardless of when they last changed. + */ + @Override + public void getMetrics(MetricsCollector collector, boolean all) { + + + MetricsRecordBuilder mrb = collector.addRecord(metricsName) + .setContext(metricsContext); + + if (regionSources != null) { + for (MetricsRegionSourceImpl regionMetricSource : regionSources) { + regionMetricSource.snapshot(mrb, all); + } + } + + metricsRegistry.snapshot(mrb, all); + } +} diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactoryImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactoryImpl.java new file mode 100644 index 00000000000..dc4ae6abc7c --- /dev/null +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactoryImpl.java @@ -0,0 +1,52 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.regionserver; + +/** + * Factory to create MetricsRegionServerSource when given a MetricsRegionServerWrapper + */ +public class MetricsRegionServerSourceFactoryImpl implements MetricsRegionServerSourceFactory { + private static enum FactoryStorage { + INSTANCE; + private MetricsRegionServerSource serverSource; + private MetricsRegionAggregateSourceImpl aggImpl; + } + + private synchronized MetricsRegionAggregateSourceImpl getAggregate() { + if (FactoryStorage.INSTANCE.aggImpl == null) { + FactoryStorage.INSTANCE.aggImpl = new MetricsRegionAggregateSourceImpl(); + } + return FactoryStorage.INSTANCE.aggImpl; + } + + + @Override + public synchronized MetricsRegionServerSource createServer(MetricsRegionServerWrapper regionServerWrapper) { + if (FactoryStorage.INSTANCE.serverSource == null) { + FactoryStorage.INSTANCE.serverSource = new MetricsRegionServerSourceImpl( + regionServerWrapper); + } + return FactoryStorage.INSTANCE.serverSource; + } + + @Override + public MetricsRegionSource createRegion(MetricsRegionWrapper wrapper) { + return new MetricsRegionSourceImpl(wrapper, getAggregate()); + } +} diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java new file mode 100644 index 00000000000..fe8d0231d1d --- /dev/null +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java @@ -0,0 +1,164 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.regionserver; + +import org.apache.hadoop.hbase.metrics.BaseSourceImpl; +import org.apache.hadoop.metrics2.MetricHistogram; +import org.apache.hadoop.metrics2.MetricsCollector; +import org.apache.hadoop.metrics2.MetricsRecordBuilder; +import org.apache.hadoop.metrics2.lib.Interns; + +/** + * Hadoop1 implementation of MetricsRegionServerSource. + */ +public class MetricsRegionServerSourceImpl + extends BaseSourceImpl implements MetricsRegionServerSource { + + final MetricsRegionServerWrapper rsWrap; + private final MetricHistogram putHisto; + private final MetricHistogram deleteHisto; + private final MetricHistogram getHisto; + private final MetricHistogram incrementHisto; + private final MetricHistogram appendHisto; + + public MetricsRegionServerSourceImpl(MetricsRegionServerWrapper rsWrap) { + this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT, rsWrap); + } + + public MetricsRegionServerSourceImpl(String metricsName, + String metricsDescription, + String metricsContext, + String metricsJmxContext, + MetricsRegionServerWrapper rsWrap) { + super(metricsName, metricsDescription, metricsContext, metricsJmxContext); + this.rsWrap = rsWrap; + + putHisto = getMetricsRegistry().getHistogram(PUT_KEY); + deleteHisto = getMetricsRegistry().getHistogram(DELETE_KEY); + getHisto = getMetricsRegistry().getHistogram(GET_KEY); + incrementHisto = getMetricsRegistry().getHistogram(INCREMENT_KEY); + appendHisto = getMetricsRegistry().getHistogram(APPEND_KEY); + } + + @Override + public void init() { + super.init(); + } + + @Override + public void updatePut(long t) { + putHisto.add(t); + } + + @Override + public void updateDelete(long t) { + deleteHisto.add(t); + } + + @Override + public void updateGet(long t) { + getHisto.add(t); + } + + @Override + public void updateIncrement(long t) { + incrementHisto.add(t); + } + + @Override + public void updateAppend(long t) { + appendHisto.add(t); + } + + /** + * Yes this is a get function that doesn't return anything. Thanks Hadoop for breaking all + * expectations of java programmers. Instead of returning anything Hadoop metrics expects + * getMetrics to push the metrics into the collector. + * + * @param metricsCollector Collector to accept metrics + * @param all push all or only changed? + */ + @Override + public void getMetrics(MetricsCollector metricsCollector, boolean all) { + + MetricsRecordBuilder mrb = metricsCollector.addRecord(metricsName) + .setContext(metricsContext); + + // rsWrap can be null because this function is called inside of init. + if (rsWrap != null) { + mrb.addGauge(Interns.info(REGION_COUNT, REGION_COUNT_DESC), rsWrap.getNumOnlineRegions()) + .addGauge(Interns.info(STORE_COUNT, STORE_COUNT_DESC), rsWrap.getNumStores()) + .addGauge(Interns.info(STOREFILE_COUNT, STOREFILE_COUNT_DESC), rsWrap.getNumStoreFiles()) + .addGauge(Interns.info(MEMSTORE_SIZE, MEMSTORE_SIZE_DESC), rsWrap.getMemstoreSize()) + .addGauge(Interns.info(STOREFILE_SIZE, STOREFILE_SIZE_DESC), rsWrap.getStoreFileSize()) + .addGauge(Interns.info(RS_START_TIME_NAME, RS_START_TIME_DESC), + rsWrap.getStartCode()) + .addCounter(Interns.info(TOTAL_REQUEST_COUNT, TOTAL_REQUEST_COUNT_DESC), + rsWrap.getTotalRequestCount()) + .addCounter(Interns.info(READ_REQUEST_COUNT, READ_REQUEST_COUNT_DESC), + rsWrap.getReadRequestsCount()) + .addCounter(Interns.info(WRITE_REQUEST_COUNT, WRITE_REQUEST_COUNT_DESC), + rsWrap.getWriteRequestsCount()) + .addCounter(Interns.info(CHECK_MUTATE_FAILED_COUNT, CHECK_MUTATE_FAILED_COUNT_DESC), + rsWrap.getCheckAndMutateChecksFailed()) + .addCounter(Interns.info(CHECK_MUTATE_PASSED_COUNT, CHECK_MUTATE_PASSED_COUNT_DESC), + rsWrap.getCheckAndMutateChecksPassed()) + .addGauge(Interns.info(STOREFILE_INDEX_SIZE, STOREFILE_INDEX_SIZE_DESC), + rsWrap.getStoreFileIndexSize()) + .addGauge(Interns.info(STATIC_INDEX_SIZE, STATIC_INDEX_SIZE_DESC), + rsWrap.getTotalStaticIndexSize()) + .addGauge(Interns.info(STATIC_BLOOM_SIZE, STATIC_BLOOM_SIZE_DESC), + rsWrap.getTotalStaticBloomSize()) + .addGauge(Interns.info(NUMBER_OF_PUTS_WITHOUT_WAL, NUMBER_OF_PUTS_WITHOUT_WAL_DESC), + rsWrap.getNumPutsWithoutWAL()) + .addGauge(Interns.info(DATA_SIZE_WITHOUT_WAL, DATA_SIZE_WITHOUT_WAL_DESC), + rsWrap.getDataInMemoryWithoutWAL()) + .addGauge(Interns.info(PERCENT_FILES_LOCAL, PERCENT_FILES_LOCAL_DESC), + rsWrap.getPercentFileLocal()) + .addGauge(Interns.info(COMPACTION_QUEUE_LENGTH, COMPACTION_QUEUE_LENGTH_DESC), + rsWrap.getCompactionQueueSize()) + .addGauge(Interns.info(FLUSH_QUEUE_LENGTH, FLUSH_QUEUE_LENGTH_DESC), + rsWrap.getFlushQueueSize()) + .addGauge(Interns.info(BLOCK_CACHE_FREE_SIZE, BLOCK_CACHE_FREE_DESC), + rsWrap.getBlockCacheFreeSize()) + .addGauge(Interns.info(BLOCK_CACHE_COUNT, BLOCK_CACHE_COUNT_DESC), + rsWrap.getBlockCacheCount()) + .addGauge(Interns.info(BLOCK_CACHE_SIZE, BLOCK_CACHE_SIZE_DESC), + rsWrap.getBlockCacheSize()) + .addCounter(Interns.info(BLOCK_CACHE_HIT_COUNT, BLOCK_CACHE_HIT_COUNT_DESC), + rsWrap.getBlockCacheHitCount()) + .addCounter(Interns.info(BLOCK_CACHE_MISS_COUNT, BLOCK_COUNT_MISS_COUNT_DESC), + rsWrap.getBlockCacheMissCount()) + .addCounter(Interns.info(BLOCK_CACHE_EVICTION_COUNT, BLOCK_CACHE_EVICTION_COUNT_DESC), + rsWrap.getBlockCacheEvictedCount()) + .addGauge(Interns.info(BLOCK_CACHE_HIT_PERCENT, BLOCK_CACHE_HIT_PERCENT_DESC), + rsWrap.getBlockCacheHitPercent()) + .addGauge(Interns.info(BLOCK_CACHE_EXPRESS_HIT_PERCENT, + BLOCK_CACHE_EXPRESS_HIT_PERCENT_DESC), rsWrap.getBlockCacheHitCachingPercent()) + .addCounter(Interns.info(UPDATES_BLOCKED_TIME, UPDATES_BLOCKED_DESC), + rsWrap.getUpdatesBlockedTime()) + .tag(Interns.info(ZOOKEEPER_QUORUM_NAME, ZOOKEEPER_QUORUM_DESC), + rsWrap.getZookeeperQuorum()) + .tag(Interns.info(SERVER_NAME_NAME, SERVER_NAME_DESC), rsWrap.getServerName()) + .tag(Interns.info(CLUSTER_ID_NAME, CLUSTER_ID_DESC), rsWrap.getClusterId()); + } + + metricsRegistry.snapshot(mrb, all); + } +} diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java new file mode 100644 index 00000000000..ad9eb277c16 --- /dev/null +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java @@ -0,0 +1,158 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.regionserver; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.metrics2.MetricsRecordBuilder; +import org.apache.hadoop.metrics2.impl.JmxCacheBuster; +import org.apache.hadoop.metrics2.lib.DynamicMetricsRegistry; +import org.apache.hadoop.metrics2.lib.Interns; +import org.apache.hadoop.metrics2.lib.MutableCounterLong; + +public class MetricsRegionSourceImpl implements MetricsRegionSource { + + private final MetricsRegionWrapper regionWrapper; + private boolean closed = false; + private MetricsRegionAggregateSourceImpl agg; + private DynamicMetricsRegistry registry; + private static final Log LOG = LogFactory.getLog(MetricsRegionSourceImpl.class); + + private String regionNamePrefix; + private String regionPutKey; + private String regionDeleteKey; + private String regionGetKey; + private String regionIncrementKey; + private String regionAppendKey; + private MutableCounterLong regionPut; + private MutableCounterLong regionDelete; + private MutableCounterLong regionGet; + private MutableCounterLong regionIncrement; + private MutableCounterLong regionAppend; + + + public MetricsRegionSourceImpl(MetricsRegionWrapper regionWrapper, + MetricsRegionAggregateSourceImpl aggregate) { + this.regionWrapper = regionWrapper; + agg = aggregate; + agg.register(this); + + LOG.debug("Creating new MetricsRegionSourceImpl for table " + + regionWrapper.getTableName() + + " " + + regionWrapper.getRegionName()); + + registry = agg.getMetricsRegistry(); + + regionNamePrefix = "table." + regionWrapper.getTableName() + "." + + "region." + regionWrapper.getRegionName() + "."; + + String suffix = "Count"; + + regionPutKey = regionNamePrefix + MetricsRegionServerSource.PUT_KEY + suffix; + regionPut = registry.getLongCounter(regionPutKey, 0l); + + regionDeleteKey = regionNamePrefix + MetricsRegionServerSource.DELETE_KEY + suffix; + regionDelete = registry.getLongCounter(regionDeleteKey, 0l); + + regionGetKey = regionNamePrefix + MetricsRegionServerSource.GET_KEY + suffix; + regionGet = registry.getLongCounter(regionGetKey, 0l); + + regionIncrementKey = regionNamePrefix + MetricsRegionServerSource.INCREMENT_KEY + suffix; + regionIncrement = registry.getLongCounter(regionIncrementKey, 0l); + + regionAppendKey = regionNamePrefix + MetricsRegionServerSource.APPEND_KEY + suffix; + regionAppend = registry.getLongCounter(regionAppendKey, 0l); + } + + @Override + public void close() { + closed = true; + agg.deregister(this); + + LOG.trace("Removing region Metrics: " + regionWrapper.getRegionName()); + registry.removeMetric(regionPutKey); + registry.removeMetric(regionDeleteKey); + registry.removeMetric(regionGetKey); + registry.removeMetric(regionIncrementKey); + + registry.removeMetric(regionAppendKey); + + JmxCacheBuster.clearJmxCache(); + } + + @Override + public void updatePut() { + regionPut.incr(); + } + + @Override + public void updateDelete() { + regionDelete.incr(); + } + + @Override + public void updateGet() { + regionGet.incr(); + } + + @Override + public void updateIncrement() { + regionIncrement.incr(); + } + + @Override + public void updateAppend() { + regionAppend.incr(); + } + + @Override + public MetricsRegionAggregateSource getAggregateSource() { + return agg; + } + + @Override + public int compareTo(MetricsRegionSource source) { + + if (!(source instanceof MetricsRegionSourceImpl)) + return -1; + + MetricsRegionSourceImpl impl = (MetricsRegionSourceImpl) source; + return this.regionWrapper.getRegionName() + .compareTo(impl.regionWrapper.getRegionName()); + } + + void snapshot(MetricsRecordBuilder mrb, boolean ignored) { + if (closed) return; + + mrb.addGauge( + Interns.info(regionNamePrefix + MetricsRegionServerSource.STORE_COUNT, + MetricsRegionServerSource.STORE_COUNT_DESC), + this.regionWrapper.getNumStores()); + mrb.addGauge(Interns.info(regionNamePrefix + MetricsRegionServerSource.STOREFILE_COUNT, + MetricsRegionServerSource.STOREFILE_COUNT_DESC), + this.regionWrapper.getNumStoreFiles()); + mrb.addGauge(Interns.info(regionNamePrefix + MetricsRegionServerSource.MEMSTORE_SIZE, + MetricsRegionServerSource.MEMSTORE_SIZE_DESC), + this.regionWrapper.getMemstoreSize()); + mrb.addGauge(Interns.info(regionNamePrefix + MetricsRegionServerSource.STOREFILE_SIZE, + MetricsRegionServerSource.STOREFILE_SIZE_DESC), + this.regionWrapper.getStoreFileSize()); + } +} diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationMetricsSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceImpl.java similarity index 75% rename from hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationMetricsSourceImpl.java rename to hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceImpl.java index 3f2a40dc32b..594d3271862 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationMetricsSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceImpl.java @@ -16,23 +16,23 @@ * limitations under the License. */ -package org.apache.hadoop.hbase.replication.regionserver.metrics; +package org.apache.hadoop.hbase.replication.regionserver; -import org.apache.hadoop.hbase.metrics.BaseMetricsSourceImpl; +import org.apache.hadoop.hbase.metrics.BaseSourceImpl; /** - * Hadoop2 implementation of ReplicationMetricsSource. This provides access to metrics gauges and + * Hadoop2 implementation of MetricsReplicationSource. This provides access to metrics gauges and * counters. */ -public class ReplicationMetricsSourceImpl extends BaseMetricsSourceImpl implements - ReplicationMetricsSource { +public class MetricsReplicationSourceImpl extends BaseSourceImpl implements + MetricsReplicationSource { - public ReplicationMetricsSourceImpl() { + public MetricsReplicationSourceImpl() { this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); } - ReplicationMetricsSourceImpl(String metricsName, + MetricsReplicationSourceImpl(String metricsName, String metricsDescription, String metricsContext, String metricsJmxContext) { diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/rest/metrics/RESTMetricsSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSourceImpl.java similarity index 89% rename from hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/rest/metrics/RESTMetricsSourceImpl.java rename to hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSourceImpl.java index a104d36c5a4..14e3cfdc906 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/rest/metrics/RESTMetricsSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSourceImpl.java @@ -16,16 +16,16 @@ * limitations under the License. */ -package org.apache.hadoop.hbase.rest.metrics; +package org.apache.hadoop.hbase.rest; -import org.apache.hadoop.hbase.metrics.BaseMetricsSourceImpl; +import org.apache.hadoop.hbase.metrics.BaseSourceImpl; import org.apache.hadoop.metrics2.lib.MutableCounterLong; /** * Hadoop Two implementation of a metrics2 source that will export metrics from the Rest server to * the hadoop metrics2 subsystem. */ -public class RESTMetricsSourceImpl extends BaseMetricsSourceImpl implements RESTMetricsSource { +public class MetricsRESTSourceImpl extends BaseSourceImpl implements MetricsRESTSource { private MutableCounterLong request; private MutableCounterLong sucGet; @@ -35,11 +35,11 @@ public class RESTMetricsSourceImpl extends BaseMetricsSourceImpl implements REST private MutableCounterLong fPut; private MutableCounterLong fDel; - public RESTMetricsSourceImpl() { + public MetricsRESTSourceImpl() { this(METRICS_NAME, METRICS_DESCRIPTION, CONTEXT, JMX_CONTEXT); } - public RESTMetricsSourceImpl(String metricsName, + public MetricsRESTSourceImpl(String metricsName, String metricsDescription, String metricsContext, String metricsJmxContext) { @@ -92,6 +92,6 @@ public class RESTMetricsSourceImpl extends BaseMetricsSourceImpl implements REST @Override public void incrementFailedDeleteRequests(int inc) { - fDel.incr(inc); + fDel.incr(inc); } } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/metrics/ThriftServerMetricsSourceFactoryImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactoryImpl.java similarity index 71% rename from hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/metrics/ThriftServerMetricsSourceFactoryImpl.java rename to hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactoryImpl.java index 718e4b0ebf7..b6015403745 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/metrics/ThriftServerMetricsSourceFactoryImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactoryImpl.java @@ -16,12 +16,12 @@ * limitations under the License. */ -package org.apache.hadoop.hbase.thrift.metrics; +package org.apache.hadoop.hbase.thrift; /** - * Class used to create metrics sources for Thrift and Thrift2 servers. + * Class used to create metrics sources for Thrift and Thrift2 servers. */ -public class ThriftServerMetricsSourceFactoryImpl implements ThriftServerMetricsSourceFactory { +public class MetricsThriftServerSourceFactoryImpl implements MetricsThriftServerSourceFactory { /** * A singleton used to make sure that only one thrift metrics source per server type is ever @@ -29,23 +29,23 @@ public class ThriftServerMetricsSourceFactoryImpl implements ThriftServerMetrics */ private static enum FactoryStorage { INSTANCE; - ThriftServerMetricsSourceImpl thriftOne = new ThriftServerMetricsSourceImpl(METRICS_NAME, + MetricsThriftServerSourceImpl thriftOne = new MetricsThriftServerSourceImpl(METRICS_NAME, METRICS_DESCRIPTION, THRIFT_ONE_METRICS_CONTEXT, THRIFT_ONE_JMX_CONTEXT); - ThriftServerMetricsSourceImpl thriftTwo = new ThriftServerMetricsSourceImpl(METRICS_NAME, + MetricsThriftServerSourceImpl thriftTwo = new MetricsThriftServerSourceImpl(METRICS_NAME, METRICS_DESCRIPTION, THRIFT_TWO_METRICS_CONTEXT, THRIFT_TWO_JMX_CONTEXT); } @Override - public ThriftServerMetricsSource createThriftOneSource() { + public MetricsThriftServerSource createThriftOneSource() { return FactoryStorage.INSTANCE.thriftOne; } @Override - public ThriftServerMetricsSource createThriftTwoSource() { + public MetricsThriftServerSource createThriftTwoSource() { return FactoryStorage.INSTANCE.thriftTwo; } } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/metrics/ThriftServerMetricsSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceImpl.java similarity index 85% rename from hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/metrics/ThriftServerMetricsSourceImpl.java rename to hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceImpl.java index 5c9348fe4af..40a8a6c0e7f 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/metrics/ThriftServerMetricsSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceImpl.java @@ -16,18 +16,17 @@ * limitations under the License. */ -package org.apache.hadoop.hbase.thrift.metrics; +package org.apache.hadoop.hbase.thrift; -import org.apache.hadoop.hbase.metrics.BaseMetricsSourceImpl; -import org.apache.hadoop.hbase.thrift.metrics.ThriftServerMetricsSource; +import org.apache.hadoop.hbase.metrics.BaseSourceImpl; import org.apache.hadoop.metrics2.lib.MutableGaugeLong; import org.apache.hadoop.metrics2.lib.MutableStat; /** - * Hadoop 2 version of ThriftServerMetricsSource{@link ThriftServerMetricsSource} + * Hadoop 2 version of MetricsThriftServerSource{@link org.apache.hadoop.hbase.thrift.MetricsThriftServerSource} */ -public class ThriftServerMetricsSourceImpl extends BaseMetricsSourceImpl implements - ThriftServerMetricsSource { +public class MetricsThriftServerSourceImpl extends BaseSourceImpl implements + MetricsThriftServerSource { private MutableStat batchGetStat; private MutableStat batchMutateStat; @@ -38,7 +37,7 @@ public class ThriftServerMetricsSourceImpl extends BaseMetricsSourceImpl impleme private MutableGaugeLong callQueueLenGauge; - public ThriftServerMetricsSourceImpl(String metricsName, + public MetricsThriftServerSourceImpl(String metricsName, String metricsDescription, String metricsContext, String metricsJmxContext) { @@ -50,12 +49,12 @@ public class ThriftServerMetricsSourceImpl extends BaseMetricsSourceImpl impleme super.init(); batchGetStat = getMetricsRegistry().newStat(BATCH_GET_KEY, "", "Keys", "Ops"); batchMutateStat = getMetricsRegistry().newStat(BATCH_MUTATE_KEY, "", "Keys", "Ops"); - queueTimeStat = getMetricsRegistry().newRate(TIME_IN_QUEUE_KEY) ; + queueTimeStat = getMetricsRegistry().newRate(TIME_IN_QUEUE_KEY); thriftCallStat = getMetricsRegistry().newRate(THRIFT_CALL_KEY); thriftSlowCallStat = getMetricsRegistry().newRate(SLOW_THRIFT_CALL_KEY); - callQueueLenGauge = getMetricsRegistry().getLongGauge(CALL_QUEUE_LEN_KEY, 0) ; + callQueueLenGauge = getMetricsRegistry().getLongGauge(CALL_QUEUE_LEN_KEY, 0); } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java new file mode 100644 index 00000000000..ce5b9e2eec7 --- /dev/null +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java @@ -0,0 +1,54 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.metrics2.impl; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; + +/** + * JMX caches the beans that have been exported; even after the values are removed from hadoop's + * metrics system the keys and old values will still remain. This class stops and restarts the + * Hadoop metrics system, forcing JMX to clear the cache of exported metrics. + * + * This class need to be in the o.a.h.metrics2.impl namespace as many of the variables/calls used + * are package private. + */ +public class JmxCacheBuster { + private static final Log LOG = LogFactory.getLog(JmxCacheBuster.class); + + /** + * For JMX to forget about all previously exported metrics. + */ + public static void clearJmxCache() { + LOG.trace("Clearing JMX mbean cache."); + + // This is pretty extreme but it's the best way that + // I could find to get metrics to be removed. + try { + if (DefaultMetricsSystem.instance() != null ) { + DefaultMetricsSystem.instance().stop(); + DefaultMetricsSystem.instance().start(); + } + + } catch (Exception exception ) { + LOG.debug("error clearing the jmx it appears the metrics system hasn't been started", exception); + } + } +} diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java index a4238931d81..080bd4d52f0 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java @@ -528,6 +528,7 @@ public class DynamicMetricsRegistry { return returnExistingWithCast(metric, metricClass, name); } + @SuppressWarnings("unchecked") private T returnExistingWithCast(MutableMetric metric, Class metricClass, String name) { if (!metricClass.isAssignableFrom(metric.getClass())) { diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricMutableQuantiles.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricMutableQuantiles.java index 28e92c15251..766cf9609f7 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricMutableQuantiles.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricMutableQuantiles.java @@ -22,8 +22,8 @@ import com.google.common.annotations.VisibleForTesting; import org.apache.commons.lang.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.metrics.MetricHistogram; -import org.apache.hadoop.metrics.MetricsExecutor; +import org.apache.hadoop.metrics2.MetricHistogram; +import org.apache.hadoop.metrics2.MetricsExecutor; import org.apache.hadoop.metrics2.MetricsInfo; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.util.MetricQuantile; diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricsExecutorImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricsExecutorImpl.java index 31357581891..d47912c273c 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricsExecutorImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricsExecutorImpl.java @@ -18,7 +18,7 @@ package org.apache.hadoop.metrics2.lib; -import org.apache.hadoop.metrics.MetricsExecutor; +import org.apache.hadoop.metrics2.MetricsExecutor; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledThreadPoolExecutor; diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableHistogram.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableHistogram.java index 4fb0be9bfc7..3b012e9001f 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableHistogram.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableHistogram.java @@ -22,7 +22,7 @@ import com.yammer.metrics.stats.ExponentiallyDecayingSample; import com.yammer.metrics.stats.Sample; import com.yammer.metrics.stats.Snapshot; import org.apache.commons.lang.StringUtils; -import org.apache.hadoop.metrics.MetricHistogram; +import org.apache.hadoop.metrics2.MetricHistogram; import org.apache.hadoop.metrics2.MetricsInfo; import org.apache.hadoop.metrics2.MetricsRecordBuilder; diff --git a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.MetricsMasterSourceFactory b/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.MetricsMasterSourceFactory new file mode 100644 index 00000000000..a5e43e4fcd2 --- /dev/null +++ b/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.MetricsMasterSourceFactory @@ -0,0 +1 @@ +org.apache.hadoop.hbase.master.MetricsMasterSourceFactoryImpl \ No newline at end of file diff --git a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.metrics.MasterMetricsSourceFactory b/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.metrics.MasterMetricsSourceFactory deleted file mode 100644 index e81c3dcc43f..00000000000 --- a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.master.metrics.MasterMetricsSourceFactory +++ /dev/null @@ -1 +0,0 @@ -org.apache.hadoop.hbase.master.metrics.MasterMetricsSourceFactoryImpl diff --git a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactory b/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactory new file mode 100644 index 00000000000..bc2f6430478 --- /dev/null +++ b/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactory @@ -0,0 +1 @@ +org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactoryImpl diff --git a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSource b/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSource new file mode 100644 index 00000000000..1e0dd200e6f --- /dev/null +++ b/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSource @@ -0,0 +1 @@ +org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceImpl \ No newline at end of file diff --git a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.replication.regionserver.metrics.ReplicationMetricsSource b/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.replication.regionserver.metrics.ReplicationMetricsSource deleted file mode 100644 index bb64ad5ba0d..00000000000 --- a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.replication.regionserver.metrics.ReplicationMetricsSource +++ /dev/null @@ -1 +0,0 @@ -org.apache.hadoop.hbase.replication.regionserver.metrics.ReplicationMetricsSourceImpl \ No newline at end of file diff --git a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.rest.MetricsRESTSource b/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.rest.MetricsRESTSource new file mode 100644 index 00000000000..5a4a8e9c044 --- /dev/null +++ b/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.rest.MetricsRESTSource @@ -0,0 +1 @@ +org.apache.hadoop.hbase.rest.MetricsRESTSourceImpl \ No newline at end of file diff --git a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.rest.metrics.RESTMetricsSource b/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.rest.metrics.RESTMetricsSource deleted file mode 100644 index 9e7a28d7b9b..00000000000 --- a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.rest.metrics.RESTMetricsSource +++ /dev/null @@ -1 +0,0 @@ -org.apache.hadoop.hbase.rest.metrics.RESTMetricsSourceImpl \ No newline at end of file diff --git a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactory b/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactory new file mode 100644 index 00000000000..2b5c16338cd --- /dev/null +++ b/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactory @@ -0,0 +1 @@ +org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactoryImpl \ No newline at end of file diff --git a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.thrift.metrics.ThriftServerMetricsSourceFactory b/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.thrift.metrics.ThriftServerMetricsSourceFactory deleted file mode 100644 index 62d1c6a9325..00000000000 --- a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.thrift.metrics.ThriftServerMetricsSourceFactory +++ /dev/null @@ -1 +0,0 @@ -org.apache.hadoop.hbase.thrift.metrics.ThriftServerMetricsSourceFactoryImpl \ No newline at end of file diff --git a/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.metrics2.MetricsExecutor b/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.metrics2.MetricsExecutor new file mode 100644 index 00000000000..dc120525ba8 --- /dev/null +++ b/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.metrics2.MetricsExecutor @@ -0,0 +1 @@ +org.apache.hadoop.metrics2.lib.MetricsExecutorImpl diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/master/metrics/TestMasterMetricsSourceImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterSourceImpl.java similarity index 60% rename from hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/master/metrics/TestMasterMetricsSourceImpl.java rename to hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterSourceImpl.java index fe384d7f405..4cdd60677fe 100644 --- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/master/metrics/TestMasterMetricsSourceImpl.java +++ b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterSourceImpl.java @@ -16,26 +16,29 @@ * limitations under the License. */ -package org.apache.hadoop.hbase.master.metrics; +package org.apache.hadoop.hbase.master; import org.apache.hadoop.hbase.CompatibilitySingletonFactory; +import org.apache.hadoop.hbase.master.MetricsMasterSource; +import org.apache.hadoop.hbase.master.MetricsMasterSourceFactory; +import org.apache.hadoop.hbase.master.MetricsMasterSourceImpl; import org.junit.Test; import static org.junit.Assert.assertSame; import static org.junit.Assert.assertTrue; /** - * Test for MasterMetricsSourceImpl + * Test for MetricsMasterSourceImpl */ -public class TestMasterMetricsSourceImpl { +public class TestMetricsMasterSourceImpl { @Test public void testGetInstance() throws Exception { - MasterMetricsSourceFactory masterMetricsSourceFactory = CompatibilitySingletonFactory - .getInstance(MasterMetricsSourceFactory.class); - MasterMetricsSource masterMetricsSource = masterMetricsSourceFactory.create(null); - assertTrue(masterMetricsSource instanceof MasterMetricsSourceImpl); - assertSame(masterMetricsSourceFactory, CompatibilitySingletonFactory.getInstance(MasterMetricsSourceFactory.class)); + MetricsMasterSourceFactory metricsMasterSourceFactory = CompatibilitySingletonFactory + .getInstance(MetricsMasterSourceFactory.class); + MetricsMasterSource masterSource = metricsMasterSourceFactory.create(null); + assertTrue(masterSource instanceof MetricsMasterSourceImpl); + assertSame(metricsMasterSourceFactory, CompatibilitySingletonFactory.getInstance(MetricsMasterSourceFactory.class)); } } diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseMetricsSourceImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseSourceImpl.java similarity index 82% rename from hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseMetricsSourceImpl.java rename to hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseSourceImpl.java index f3347029e1d..3c9d792409c 100644 --- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseMetricsSourceImpl.java +++ b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseSourceImpl.java @@ -27,15 +27,15 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; /** - * Test of default BaseMetricsSource for hadoop 2 + * Test of default BaseSource for hadoop 2 */ -public class TestBaseMetricsSourceImpl { +public class TestBaseSourceImpl { - private static BaseMetricsSourceImpl bmsi; + private static BaseSourceImpl bmsi; @BeforeClass public static void setUp() throws Exception { - bmsi = new BaseMetricsSourceImpl("TestName", "test description", "testcontext", "TestContext"); + bmsi = new BaseSourceImpl("TestName", "test description", "testcontext", "TestContext"); } @Test @@ -75,16 +75,10 @@ public class TestBaseMetricsSourceImpl { } @Test - public void testRemoveGauge() throws Exception { + public void testRemoveMetric() throws Exception { bmsi.setGauge("testrmgauge", 100); - bmsi.removeGauge("testrmgauge"); + bmsi.removeMetric("testrmgauge"); assertNull(bmsi.metricsRegistry.get("testrmgauge")); } - @Test - public void testRemoveCounter() throws Exception { - bmsi.incCounters("testrmcounter", 100); - bmsi.removeCounter("testrmcounter"); - assertNull(bmsi.metricsRegistry.get("testrmcounter")); - } } diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerSourceImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerSourceImpl.java new file mode 100644 index 00000000000..e6e16c78c64 --- /dev/null +++ b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerSourceImpl.java @@ -0,0 +1,50 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.regionserver; + +import org.apache.hadoop.hbase.CompatibilitySingletonFactory; +import org.junit.Test; + +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +/** + * Test for MetricsRegionServerSourceImpl + */ +public class TestMetricsRegionServerSourceImpl { + + @Test + public void testGetInstance() throws Exception { + MetricsRegionServerSourceFactory metricsRegionServerSourceFactory = + CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class); + MetricsRegionServerSource serverSource = + metricsRegionServerSourceFactory.createServer(null); + assertTrue(serverSource instanceof MetricsRegionServerSourceImpl); + assertSame(metricsRegionServerSourceFactory, + CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class)); + } + + + @Test(expected = RuntimeException.class) + public void testNoGetRegionServerMetricsSourceImpl() throws Exception { + // This should throw an exception because MetricsRegionServerSourceImpl should only + // be created by a factory. + CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceImpl.class); + } +} diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java new file mode 100644 index 00000000000..89c0762e6d9 --- /dev/null +++ b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java @@ -0,0 +1,101 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.regionserver; + +import org.apache.hadoop.hbase.CompatibilitySingletonFactory; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +public class TestMetricsRegionSourceImpl { + + @Test + public void testCompareTo() throws Exception { + MetricsRegionServerSourceFactory fact = CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class); + + MetricsRegionSource one = fact.createRegion(new RegionWrapperStub("TEST")); + MetricsRegionSource oneClone = fact.createRegion(new RegionWrapperStub("TEST")); + MetricsRegionSource two = fact.createRegion(new RegionWrapperStub("TWO")); + + assertEquals(0, one.compareTo(oneClone)); + + assertTrue( one.compareTo(two) < 0); + assertTrue( two.compareTo(one) > 0); + } + + + @Test(expected = RuntimeException.class) + public void testNoGetRegionServerMetricsSourceImpl() throws Exception { + // This should throw an exception because MetricsRegionSourceImpl should only + // be created by a factory. + CompatibilitySingletonFactory.getInstance(MetricsRegionSource.class); + } + + class RegionWrapperStub implements MetricsRegionWrapper { + + private String regionName; + + public RegionWrapperStub(String regionName) { + + + this.regionName = regionName; + } + + @Override + public String getTableName() { + return null; //To change body of implemented methods use File | Settings | File Templates. + } + + @Override + public String getRegionName() { + return this.regionName; + } + + @Override + public long getNumStores() { + return 0; //To change body of implemented methods use File | Settings | File Templates. + } + + @Override + public long getNumStoreFiles() { + return 0; //To change body of implemented methods use File | Settings | File Templates. + } + + @Override + public long getMemstoreSize() { + return 0; //To change body of implemented methods use File | Settings | File Templates. + } + + @Override + public long getStoreFileSize() { + return 0; //To change body of implemented methods use File | Settings | File Templates. + } + + @Override + public long getReadRequestCount() { + return 0; //To change body of implemented methods use File | Settings | File Templates. + } + + @Override + public long getWriteRequestCount() { + return 0; //To change body of implemented methods use File | Settings | File Templates. + } + } +} diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/metrics/TestReplicationMetricsSourceImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceImpl.java similarity index 66% rename from hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/metrics/TestReplicationMetricsSourceImpl.java rename to hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceImpl.java index 04248e0c36b..bd7f3dde4e1 100644 --- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/metrics/TestReplicationMetricsSourceImpl.java +++ b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceImpl.java @@ -16,20 +16,22 @@ * limitations under the License. */ -package org.apache.hadoop.hbase.replication.regionserver.metrics; +package org.apache.hadoop.hbase.replication.regionserver; import org.apache.hadoop.hbase.CompatibilitySingletonFactory; +import org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSource; +import org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceImpl; import org.junit.Test; import static org.junit.Assert.assertTrue; -/** Test for ReplicationMetricsSourceImpl */ -public class TestReplicationMetricsSourceImpl { +/** Test for MetricsReplicationSourceImpl */ +public class TestMetricsReplicationSourceImpl { @Test public void testGetInstance() throws Exception { - ReplicationMetricsSource rms = CompatibilitySingletonFactory - .getInstance(ReplicationMetricsSource.class); - assertTrue(rms instanceof ReplicationMetricsSourceImpl); + MetricsReplicationSource rms = CompatibilitySingletonFactory + .getInstance(MetricsReplicationSource.class); + assertTrue(rms instanceof MetricsReplicationSourceImpl); } } diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/rest/metrics/TestRESTMetricsSourceImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/rest/TestMetricsRESTSourceImpl.java similarity index 70% rename from hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/rest/metrics/TestRESTMetricsSourceImpl.java rename to hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/rest/TestMetricsRESTSourceImpl.java index cc9c82d6a81..5f4e70baf1a 100644 --- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/rest/metrics/TestRESTMetricsSourceImpl.java +++ b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/rest/TestMetricsRESTSourceImpl.java @@ -16,23 +16,25 @@ * limitations under the License. */ -package org.apache.hadoop.hbase.rest.metrics; +package org.apache.hadoop.hbase.rest; import org.apache.hadoop.hbase.CompatibilitySingletonFactory; +import org.apache.hadoop.hbase.rest.MetricsRESTSource; +import org.apache.hadoop.hbase.rest.MetricsRESTSourceImpl; import org.junit.Test; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; /** - * Test for hadoop 2's version of RESTMetricsSource + * Test for hadoop 2's version of MetricsRESTSource */ -public class TestRESTMetricsSourceImpl { +public class TestMetricsRESTSourceImpl { @Test public void ensureCompatRegistered() throws Exception { - assertNotNull(CompatibilitySingletonFactory.getInstance(RESTMetricsSource.class)); - assertTrue(CompatibilitySingletonFactory.getInstance(RESTMetricsSource.class) instanceof RESTMetricsSourceImpl); + assertNotNull(CompatibilitySingletonFactory.getInstance(MetricsRESTSource.class)); + assertTrue(CompatibilitySingletonFactory.getInstance(MetricsRESTSource.class) instanceof MetricsRESTSourceImpl); } } diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelperImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelperImpl.java index b8b06ab4504..29c74de9259 100644 --- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelperImpl.java +++ b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelperImpl.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hbase.test; -import org.apache.hadoop.hbase.metrics.BaseMetricsSource; -import org.apache.hadoop.hbase.metrics.BaseMetricsSourceImpl; +import org.apache.hadoop.hbase.metrics.BaseSource; +import org.apache.hadoop.hbase.metrics.BaseSourceImpl; import org.apache.hadoop.metrics2.AbstractMetric; import org.apache.hadoop.metrics2.MetricsCollector; import org.apache.hadoop.metrics2.MetricsInfo; @@ -129,68 +129,68 @@ public class MetricsAssertHelperImpl implements MetricsAssertHelper { } @Override - public void assertTag(String name, String expected, BaseMetricsSource source) { + public void assertTag(String name, String expected, BaseSource source) { getMetrics(source); String cName = canonicalizeMetricName(name); assertEquals("Tags should be equal", expected, tags.get(cName)); } @Override - public void assertGauge(String name, long expected, BaseMetricsSource source) { + public void assertGauge(String name, long expected, BaseSource source) { long found = getGaugeLong(name, source); assertEquals("Metrics Should be equal", (long) Long.valueOf(expected), found); } @Override - public void assertGaugeGt(String name, long expected, BaseMetricsSource source) { + public void assertGaugeGt(String name, long expected, BaseSource source) { double found = getGaugeDouble(name, source); assertTrue(name + " (" + found + ") should be greater than " + expected, found > expected); } @Override - public void assertGaugeLt(String name, long expected, BaseMetricsSource source) { + public void assertGaugeLt(String name, long expected, BaseSource source) { double found = getGaugeDouble(name, source); assertTrue(name + "(" + found + ") should be less than " + expected, found < expected); } @Override - public void assertGauge(String name, double expected, BaseMetricsSource source) { + public void assertGauge(String name, double expected, BaseSource source) { double found = getGaugeDouble(name, source); - assertEquals("Metrics Should be equal", (double) Double.valueOf(expected), found); + assertEquals("Metrics Should be equal", (double) Double.valueOf(expected), found, 0.01); } @Override - public void assertGaugeGt(String name, double expected, BaseMetricsSource source) { + public void assertGaugeGt(String name, double expected, BaseSource source) { double found = getGaugeDouble(name, source); assertTrue(name + "(" + found + ") should be greater than " + expected, found > expected); } @Override - public void assertGaugeLt(String name, double expected, BaseMetricsSource source) { + public void assertGaugeLt(String name, double expected, BaseSource source) { double found = getGaugeDouble(name, source); assertTrue(name + "(" + found + ") should be less than " + expected, found < expected); } @Override - public void assertCounter(String name, long expected, BaseMetricsSource source) { + public void assertCounter(String name, long expected, BaseSource source) { long found = getCounter(name, source); assertEquals("Metrics Counters should be equal", (long) Long.valueOf(expected), found); } @Override - public void assertCounterGt(String name, long expected, BaseMetricsSource source) { + public void assertCounterGt(String name, long expected, BaseSource source) { long found = getCounter(name, source); assertTrue(name + " (" + found + ") should be greater than " + expected, found > expected); } @Override - public void assertCounterLt(String name, long expected, BaseMetricsSource source) { + public void assertCounterLt(String name, long expected, BaseSource source) { long found = getCounter(name, source); assertTrue(name + "(" + found + ") should be less than " + expected, found < expected); } @Override - public long getCounter(String name, BaseMetricsSource source) { + public long getCounter(String name, BaseSource source) { getMetrics(source); String cName = canonicalizeMetricName(name); assertNotNull(counters.get(cName)); @@ -198,7 +198,7 @@ public class MetricsAssertHelperImpl implements MetricsAssertHelper { } @Override - public double getGaugeDouble(String name, BaseMetricsSource source) { + public double getGaugeDouble(String name, BaseSource source) { getMetrics(source); String cName = canonicalizeMetricName(name); assertNotNull(gauges.get(cName)); @@ -206,7 +206,7 @@ public class MetricsAssertHelperImpl implements MetricsAssertHelper { } @Override - public long getGaugeLong(String name, BaseMetricsSource source) { + public long getGaugeLong(String name, BaseSource source) { getMetrics(source); String cName = canonicalizeMetricName(name); assertNotNull(gauges.get(cName)); @@ -220,12 +220,12 @@ public class MetricsAssertHelperImpl implements MetricsAssertHelper { counters.clear(); } - private void getMetrics(BaseMetricsSource source) { + private void getMetrics(BaseSource source) { reset(); - if (!(source instanceof BaseMetricsSourceImpl)) { + if (!(source instanceof BaseSourceImpl)) { assertTrue(false); } - BaseMetricsSourceImpl impl = (BaseMetricsSourceImpl) source; + BaseSourceImpl impl = (BaseSourceImpl) source; impl.getMetrics(new MockMetricsBuilder(), true); diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/thrift/metrics/TestThriftServerMetricsSourceFactoryImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestMetricsThriftServerSourceFactoryImpl.java similarity index 64% rename from hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/thrift/metrics/TestThriftServerMetricsSourceFactoryImpl.java rename to hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestMetricsThriftServerSourceFactoryImpl.java index c66c36d038a..c9eda58d9ea 100644 --- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/thrift/metrics/TestThriftServerMetricsSourceFactoryImpl.java +++ b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestMetricsThriftServerSourceFactoryImpl.java @@ -16,9 +16,11 @@ * limitations under the License. */ -package org.apache.hadoop.hbase.thrift.metrics; +package org.apache.hadoop.hbase.thrift; import org.apache.hadoop.hbase.CompatibilitySingletonFactory; +import org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactory; +import org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactoryImpl; import org.junit.Test; import static org.junit.Assert.assertNotNull; @@ -26,28 +28,28 @@ import static org.junit.Assert.assertSame; import static org.junit.Assert.assertTrue; /** - * Test for hadoop 2's version of ThriftServerMetricsSourceFactory + * Test for hadoop 2's version of MetricsThriftServerSourceFactory */ -public class TestThriftServerMetricsSourceFactoryImpl { +public class TestMetricsThriftServerSourceFactoryImpl { @Test public void testCompatabilityRegistered() throws Exception { - assertNotNull(CompatibilitySingletonFactory.getInstance(ThriftServerMetricsSourceFactory.class)); - assertTrue(CompatibilitySingletonFactory.getInstance(ThriftServerMetricsSourceFactory.class) instanceof ThriftServerMetricsSourceFactoryImpl); + assertNotNull(CompatibilitySingletonFactory.getInstance(MetricsThriftServerSourceFactory.class)); + assertTrue(CompatibilitySingletonFactory.getInstance(MetricsThriftServerSourceFactory.class) instanceof MetricsThriftServerSourceFactoryImpl); } @Test public void testCreateThriftOneSource() throws Exception { //Make sure that the factory gives back a singleton. - assertSame(new ThriftServerMetricsSourceFactoryImpl().createThriftOneSource(), - new ThriftServerMetricsSourceFactoryImpl().createThriftOneSource()); + assertSame(new MetricsThriftServerSourceFactoryImpl().createThriftOneSource(), + new MetricsThriftServerSourceFactoryImpl().createThriftOneSource()); } @Test public void testCreateThriftTwoSource() throws Exception { //Make sure that the factory gives back a singleton. - assertSame(new ThriftServerMetricsSourceFactoryImpl().createThriftTwoSource(), - new ThriftServerMetricsSourceFactoryImpl().createThriftTwoSource()); + assertSame(new MetricsThriftServerSourceFactoryImpl().createThriftTwoSource(), + new MetricsThriftServerSourceFactoryImpl().createThriftTwoSource()); } } diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon index ee66fdd9231..df20f3cd273 100644 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon +++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon @@ -24,7 +24,6 @@ String format = "html"; <%import> java.util.*; org.apache.hadoop.hbase.regionserver.HRegionServer; -org.apache.hadoop.hbase.regionserver.metrics.RegionServerMetrics; org.apache.hadoop.hbase.util.Bytes; org.apache.hadoop.hbase.HRegionInfo; org.apache.hadoop.hbase.ServerName; @@ -38,7 +37,6 @@ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad; <%java return; %> <%java> - RegionServerMetrics metrics = regionServer.getMetrics(); ServerInfo serverInfo = ProtobufUtil.getServerInfo(regionServer); ServerName serverName = ProtobufUtil.toServerName(serverInfo.getServerName()); List onlineRegions = ProtobufUtil.getOnlineRegions(regionServer); @@ -98,7 +96,7 @@ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad;

Server Metrics

- <& ServerMetricsTmpl; metrics = metrics; &> + <& ServerMetricsTmpl; mWrap = regionServer.getMetrics().getRegionServerWrapper(); &> <& ../common/TaskMonitorTmpl; filter = filter &> diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon index ac0fe6f753c..997793984e2 100644 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon +++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon @@ -23,7 +23,6 @@ <%import> java.util.*; org.apache.hadoop.hbase.regionserver.HRegionServer; - org.apache.hadoop.hbase.regionserver.metrics.RegionServerMetrics; org.apache.hadoop.hbase.util.Bytes; org.apache.hadoop.hbase.HRegionInfo; org.apache.hadoop.hbase.ServerName; diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon index 0478c1592c9..0f1c5f43d95 100644 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon +++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon @@ -17,12 +17,12 @@ See the License for the specific language governing permissions and limitations under the License. <%args> - RegionServerMetrics metrics; +MetricsRegionServerWrapper mWrap; <%import> java.util.*; org.apache.hadoop.hbase.regionserver.HRegionServer; -org.apache.hadoop.hbase.regionserver.metrics.RegionServerMetrics; +org.apache.hadoop.hbase.regionserver.MetricsRegionServerWrapper; org.apache.hadoop.hbase.util.Bytes; org.apache.hadoop.hbase.HRegionInfo; org.apache.hadoop.hbase.ServerName; @@ -42,36 +42,32 @@ java.lang.management.ManagementFactory;
  • Storefiles
  • Queues
  • Block Cache
  • -
  • Latency
  • - <& baseStats; metrics = metrics; &> + <& baseStats; mWrap = mWrap &>
    - <& memoryStats; metrics = metrics; &> + <& memoryStats; mWrap = mWrap &>
    - <& requestStats; metrics = metrics; &> + <& requestStats; mWrap = mWrap &>
    - <& storeStats; metrics = metrics; &> + <& storeStats; mWrap = mWrap &>
    - <& queueStats; metrics = metrics; &> + <& queueStats; mWrap = mWrap &>
    - <& blockCacheStats; metrics = metrics; &> -
    -
    - <& latencyStats; metrics = metrics; &> + <& blockCacheStats; mWrap = mWrap &>
    <%def baseStats> <%args> - RegionServerMetrics metrics; + MetricsRegionServerWrapper mWrap; @@ -82,17 +78,17 @@ java.lang.management.ManagementFactory; - - - - + + + +
    Slow HLog Append Count
    <% metrics.requests.getPreviousIntervalValue() %><% metrics.regions.get() %><% metrics.hdfsBlocksLocalityIndex.get() %><% metrics.slowHLogAppendCount.get() %><% mWrap.getRequestsPerSecond() %><% mWrap.getNumOnlineRegions() %><% mWrap.getPercentFileLocal() %><% 0 %>
    <%def memoryStats> <%args> - RegionServerMetrics metrics; +MetricsRegionServerWrapper mWrap; @@ -104,19 +100,19 @@ java.lang.management.ManagementFactory; - +
    - <% ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getUsed() / (1024*1024) %>MB + <% ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getUsed() %> - <% ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax() / (1024*1024) %>MB + <% ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax()%> <% metrics.memstoreSizeMB.get()%>MB<% mWrap.getMemstoreSize() %>
    <%def storeStats> <%args> - RegionServerMetrics metrics; +MetricsRegionServerWrapper mWrap; @@ -128,11 +124,11 @@ java.lang.management.ManagementFactory; - - - - - + + + + +
    Bloom Size
    <% metrics.stores.get() %><% metrics.storefiles.get() %><% metrics.rootIndexSizeKB.get() %>KB<% metrics.totalStaticIndexSizeKB.get() %>KB<% metrics.totalStaticBloomSizeKB.get() %>KB<% mWrap.getNumStores() %><% mWrap.getNumStoreFiles() %><% mWrap.getStoreFileIndexSize() %><% mWrap.getTotalStaticIndexSize() %><% mWrap.getTotalStaticBloomSize() %>
    @@ -140,8 +136,8 @@ java.lang.management.ManagementFactory; <%def requestStats> <%args> - RegionServerMetrics metrics; - +MetricsRegionServerWrapper mWrap; + @@ -149,17 +145,17 @@ java.lang.management.ManagementFactory; - - - + + +
    Request Per SecondWrite Request Count
    <% metrics.requests.getPreviousIntervalValue() %><% metrics.readRequestsCount.get() %><% metrics.writeRequestsCount.get() %>KB<% mWrap.getRequestsPerSecond() %><% mWrap.getReadRequestsCount() %><% mWrap.getWriteRequestsCount() %>KB
    <%def queueStats> <%args> - RegionServerMetrics metrics; - +MetricsRegionServerWrapper mWrap; + @@ -167,8 +163,8 @@ java.lang.management.ManagementFactory; - - + +
    Compaction queue size
    <% metrics.compactionQueueSize.get() %><% metrics.flushQueueSize.get() %>KB<% mWrap.getCompactionQueueSize() %><% mWrap.getFlushQueueSize() %>
    @@ -176,8 +172,8 @@ java.lang.management.ManagementFactory; <%def blockCacheStats> <%args> - RegionServerMetrics metrics; - +MetricsRegionServerWrapper mWrap; + @@ -190,57 +186,13 @@ java.lang.management.ManagementFactory; - - - - - - - + + + + + + +
    Cache Size
    <% metrics.blockCacheSize.get() / (1024*1024) %>MB<% metrics.blockCacheFree.get() / (1024 * 1024) %>MB<% metrics.blockCacheCount.get()%><% metrics.blockCacheHitCount.get()%><% metrics.blockCacheMissCount.get()%><% metrics.blockCacheHitRatio.get()%>%<% metrics.blockCacheEvictedCount.get()%><% mWrap.getBlockCacheSize()%><% mWrap.getBlockCacheFreeSize()%><% mWrap.getBlockCacheCount() %><% mWrap.getBlockCacheHitCount() %><% mWrap.getBlockCacheMissCount() %><% mWrap.getBlockCacheHitPercent() %>%<% mWrap.getBlockCacheEvictedCount() %>
    - - -<%def latencyStats> -<%args> - RegionServerMetrics metrics; - - - - - - - - - - - - - - - <& histogramRow; op ="FS Read"; histo = metrics.fsReadLatencyHistogram &> - <& histogramRow; op ="FS PRead"; histo = metrics.fsPreadLatencyHistogram &> - <& histogramRow; op ="FS Write"; histo = metrics.fsWriteLatencyHistogram &> - -
    OperationCountMeanMedian75th95th99th99.9th
    - - -<%def histogramRow> -<%args> - String op; - MetricsHistogram histo; - -<%java> - Snapshot s = histo.getSnapshot(); - - - <% op %> - <% histo.getCount()%> - <% String.format("%10.2f", histo.getMean()) %> - <% String.format("%10.2f", s.getMedian()) %> - <% String.format("%10.2f", s.get75thPercentile()) %> - <% String.format("%10.2f", s.get95thPercentile()) %> - <% String.format("%10.2f", s.get99thPercentile()) %> - <% String.format("%10.2f", s.get999thPercentile())%> - - + \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileReader.java index 8b6e4dc29ab..26ce6d12b6f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileReader.java @@ -30,15 +30,13 @@ import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo; -import org.apache.hadoop.hbase.regionserver.metrics.SchemaConfigured; import org.apache.hadoop.io.RawComparator; /** * Common functionality needed by all versions of {@link HFile} readers. */ @InterfaceAudience.Private -public abstract class AbstractHFileReader extends SchemaConfigured - implements HFile.Reader { +public abstract class AbstractHFileReader implements HFile.Reader { /** Filesystem-level block reader for this HFile format version. */ protected HFileBlock.FSReader fsBlockReader; @@ -119,7 +117,6 @@ public abstract class AbstractHFileReader extends SchemaConfigured final long fileSize, final boolean closeIStream, final CacheConfig cacheConf, final HFileSystem hfs) { - super(null, path); this.trailer = trailer; this.compressAlgo = trailer.getCompressionCodec(); this.cacheConf = cacheConf; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java index 1ce4683fb3a..000e11a9b0e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java @@ -34,7 +34,6 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue.KeyComparator; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo; -import org.apache.hadoop.hbase.regionserver.metrics.SchemaConfigured; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.io.RawComparator; @@ -44,8 +43,7 @@ import org.apache.hadoop.io.Writable; * Common functionality needed by all versions of {@link HFile} writers. */ @InterfaceAudience.Private -public abstract class AbstractHFileWriter extends SchemaConfigured - implements HFile.Writer { +public abstract class AbstractHFileWriter implements HFile.Writer { /** Key previously appended. Becomes the last key in the file. */ protected byte[] lastKeyBuffer = null; @@ -116,7 +114,6 @@ public abstract class AbstractHFileWriter extends SchemaConfigured Compression.Algorithm compressAlgo, HFileDataBlockEncoder dataBlockEncoder, KeyComparator comparator) { - super(null, path); this.outputStream = outputStream; this.path = path; this.name = path != null ? path.getName() : outputStream.toString(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/Cacheable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/Cacheable.java index c6b12ebc431..b205106b401 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/Cacheable.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/Cacheable.java @@ -23,7 +23,6 @@ import java.nio.ByteBuffer; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hbase.io.HeapSize; -import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; /** * Cacheable is an interface that allows for an object to be cached. If using an @@ -57,14 +56,4 @@ public interface Cacheable extends HeapSize { */ public CacheableDeserializer getDeserializer(); - /** - * @return the block type of this cached HFile block - */ - public BlockType getBlockType(); - - /** - * @return the metrics object identified by table and column family - */ - public SchemaMetrics getSchemaMetrics(); - } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java index a642f012f3e..376dc23ce13 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java @@ -49,8 +49,6 @@ import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.io.HbaseMapWritable; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; -import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; -import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics.SchemaAware; import org.apache.hadoop.hbase.util.BloomFilterWriter; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ChecksumType; @@ -284,8 +282,6 @@ public class HFile { /** @return the path to this {@link HFile} */ Path getPath(); - String getColumnFamilyName(); - void appendMetaBlock(String bloomFilterMetaKey, Writable metaWriter); /** @@ -431,7 +427,6 @@ public class HFile { */ public static final WriterFactory getWriterFactory(Configuration conf, CacheConfig cacheConf) { - SchemaMetrics.configureGlobally(conf); int version = getFormatVersion(conf); switch (version) { case 1: @@ -453,8 +448,7 @@ public class HFile { } /** An interface used by clients to open and iterate an {@link HFile}. */ - public interface Reader extends Closeable, CachingBlockReader, - SchemaAware { + public interface Reader extends Closeable, CachingBlockReader { /** * Returns this reader's "name". Usually the last component of the path. * Needs to be constant as the file is being moved to support caching on @@ -462,8 +456,6 @@ public class HFile { */ String getName(); - String getColumnFamilyName(); - RawComparator getComparator(); HFileScanner getScanner(boolean cacheBlocks, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java index d6b65a1ac26..7a07b980428 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java @@ -42,7 +42,6 @@ import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultDecodingContext; import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext; import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext; import org.apache.hadoop.hbase.regionserver.MemStore; -import org.apache.hadoop.hbase.regionserver.metrics.SchemaConfigured; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ChecksumType; import org.apache.hadoop.hbase.util.ClassSize; @@ -85,7 +84,7 @@ import com.google.common.base.Preconditions; * except that the data section is always uncompressed in the cache. */ @InterfaceAudience.Private -public class HFileBlock extends SchemaConfigured implements Cacheable { +public class HFileBlock implements Cacheable { /** Minor versions starting with this number have hbase checksums */ static final int MINOR_VERSION_WITH_CHECKSUM = 1; @@ -539,8 +538,7 @@ public class HFileBlock extends SchemaConfigured implements Cacheable { @Override public long heapSize() { long size = ClassSize.align( - // Base class size, including object overhead. - SCHEMA_CONFIGURED_UNALIGNED_HEAP_SIZE + + ClassSize.OBJECT + // Block type and byte buffer references 2 * ClassSize.REFERENCE + // On-disk size, uncompressed size, and next block's on-disk size diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java index 7fbc06da71b..c7cf8742e64 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java @@ -41,7 +41,6 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.hfile.HFile.CachingBlockReader; -import org.apache.hadoop.hbase.regionserver.metrics.SchemaConfigured; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ClassSize; import org.apache.hadoop.hbase.util.CompoundBloomFilterWriter; @@ -719,8 +718,7 @@ public class HFileBlockIndex { * index. However, in most practical cases we will only have leaf-level * blocks and the root index, or just the root index. */ - public static class BlockIndexWriter extends SchemaConfigured - implements InlineBlockWriter { + public static class BlockIndexWriter implements InlineBlockWriter { /** * While the index is being written, this represents the current block * index referencing all leaf blocks, with one exception. If the file is @@ -954,7 +952,6 @@ public class HFileBlockIndex { if (blockCache != null) { HFileBlock blockForCaching = blockWriter.getBlockForCaching(); - passSchemaMetricsTo(blockForCaching); blockCache.cacheBlock(new BlockCacheKey(nameForCaching, beginOffset, DataBlockEncoding.NONE, blockForCaching.getBlockType()), blockForCaching); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoderImpl.java index b83ef39a496..eb50a3cc961 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoderImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoderImpl.java @@ -243,7 +243,6 @@ public class HFileDataBlockEncoderImpl implements HFileDataBlockEncoder { includesMemstoreTS, block.getMinorVersion(), block.getBytesPerChecksum(), block.getChecksumType(), block.getOnDiskDataSizeWithHeader()); - block.passSchemaMetricsTo(encodedBlock); return encodedBlock; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java index e1882b380c8..be0fb17f731 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java @@ -43,7 +43,6 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo; import org.apache.hadoop.hbase.regionserver.TimeRangeTracker; -import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; import org.apache.hadoop.hbase.util.BloomFilter; import org.apache.hadoop.hbase.util.BloomFilterFactory; import org.apache.hadoop.hbase.util.ByteBloomFilter; @@ -174,7 +173,6 @@ public class HFilePrettyPrinter { conf.get(org.apache.hadoop.hbase.HConstants.HBASE_DIR)); conf.set("fs.default.name", conf.get(org.apache.hadoop.hbase.HConstants.HBASE_DIR)); - SchemaMetrics.configureGlobally(conf); try { if (!parseOptions(args)) return 1; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV1.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV1.java index 56339da36bd..436d0c4f1d1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV1.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV1.java @@ -34,7 +34,6 @@ import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory; import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo; import org.apache.hadoop.hbase.io.hfile.HFile.Writer; -import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.RawComparator; @@ -235,8 +234,6 @@ public class HFileReaderV1 extends AbstractHFileReader { cacheConf.shouldCacheBlockOnRead(effectiveCategory)); if (cachedBlock != null) { cacheHits.incrementAndGet(); - getSchemaMetrics().updateOnCacheHit(effectiveCategory, - SchemaMetrics.NO_COMPACTION); return cachedBlock.getBufferWithoutHeader(); } // Cache Miss, please load. @@ -245,13 +242,10 @@ public class HFileReaderV1 extends AbstractHFileReader { HFileBlock hfileBlock = fsBlockReader.readBlockData(offset, nextOffset - offset, metaBlockIndexReader.getRootBlockDataSize(block), true); - passSchemaMetricsTo(hfileBlock); hfileBlock.expectType(BlockType.META); final long delta = System.nanoTime() - startTimeNs; HFile.offerReadLatency(delta, true); - getSchemaMetrics().updateOnCacheMiss(effectiveCategory, - SchemaMetrics.NO_COMPACTION, delta); // Cache the block if (cacheBlock && cacheConf.shouldCacheBlockOnRead(effectiveCategory)) { @@ -300,8 +294,6 @@ public class HFileReaderV1 extends AbstractHFileReader { cacheConf.shouldCacheDataOnRead()); if (cachedBlock != null) { cacheHits.incrementAndGet(); - getSchemaMetrics().updateOnCacheHit( - cachedBlock.getBlockType().getCategory(), isCompaction); return cachedBlock.getBufferWithoutHeader(); } // Carry on, please load. @@ -323,13 +315,10 @@ public class HFileReaderV1 extends AbstractHFileReader { HFileBlock hfileBlock = fsBlockReader.readBlockData(offset, nextOffset - offset, dataBlockIndexReader.getRootBlockDataSize(block), pread); - passSchemaMetricsTo(hfileBlock); hfileBlock.expectType(BlockType.DATA); final long delta = System.nanoTime() - startTimeNs; HFile.offerReadLatency(delta, pread); - getSchemaMetrics().updateOnCacheMiss(BlockCategory.DATA, isCompaction, - delta); // Cache the block if (cacheBlock && cacheConf.shouldCacheBlockOnRead( diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java index e252f38a3d7..71e4d09cb6c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java @@ -226,7 +226,6 @@ public class HFileReaderV2 extends AbstractHFileReader { // Return a distinct 'shallow copy' of the block, // so pos does not get messed by the scanner cacheHits.incrementAndGet(); - getSchemaMetrics().updateOnCacheHit(BlockCategory.META, false); return cachedBlock.getBufferWithoutHeader(); } // Cache Miss, please load. @@ -234,11 +233,9 @@ public class HFileReaderV2 extends AbstractHFileReader { HFileBlock metaBlock = fsBlockReader.readBlockData(metaBlockOffset, blockSize, -1, true); - passSchemaMetricsTo(metaBlock); final long delta = System.nanoTime() - startTimeNs; HFile.offerReadLatency(delta, true); - getSchemaMetrics().updateOnCacheMiss(BlockCategory.META, false, delta); // Cache the block if (cacheBlock) { @@ -302,7 +299,6 @@ public class HFileReaderV2 extends AbstractHFileReader { cachedBlock.getBlockType().getCategory(); cacheHits.incrementAndGet(); - getSchemaMetrics().updateOnCacheHit(blockCategory, isCompaction); if (cachedBlock.getBlockType() == BlockType.DATA) { HFile.dataBlockReadCnt.incrementAndGet(); @@ -331,12 +327,10 @@ public class HFileReaderV2 extends AbstractHFileReader { hfileBlock = dataBlockEncoder.diskToCacheFormat(hfileBlock, isCompaction); validateBlockType(hfileBlock, expectedBlockType); - passSchemaMetricsTo(hfileBlock); BlockCategory blockCategory = hfileBlock.getBlockType().getCategory(); final long delta = System.nanoTime() - startTimeNs; HFile.offerReadLatency(delta, pread); - getSchemaMetrics().updateOnCacheMiss(blockCategory, isCompaction, delta); // Cache the block if necessary if (cacheBlock && cacheConf.shouldCacheBlockOnRead( diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV1.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV1.java index 07a58681ec7..d272255bc21 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV1.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV1.java @@ -41,7 +41,6 @@ import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo; import org.apache.hadoop.hbase.io.hfile.HFile.Writer; import org.apache.hadoop.hbase.regionserver.MemStore; -import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; import org.apache.hadoop.hbase.util.ChecksumType; import org.apache.hadoop.hbase.util.BloomFilterWriter; import org.apache.hadoop.hbase.util.Bytes; @@ -109,7 +108,6 @@ public class HFileWriterV1 extends AbstractHFileWriter { final KeyComparator comparator) throws IOException { super(cacheConf, ostream == null ? createOutputStream(conf, fs, path) : ostream, path, blockSize, compress, blockEncoder, comparator); - SchemaMetrics.configureGlobally(conf); } /** @@ -158,7 +156,6 @@ public class HFileWriterV1 extends AbstractHFileWriter { HFileBlock.HEADER_SIZE_NO_CHECKSUM); // onDiskDataSizeWithHeader block = blockEncoder.diskToCacheFormat(block, false); - passSchemaMetricsTo(block); cacheConf.getBlockCache().cacheBlock( new BlockCacheKey(name, blockBegin, DataBlockEncoding.NONE, block.getBlockType()), block); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java index 1b05138e119..2d7b0d8a074 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java @@ -37,7 +37,6 @@ import org.apache.hadoop.hbase.KeyValue.KeyComparator; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.hfile.HFile.Writer; import org.apache.hadoop.hbase.io.hfile.HFileBlock.BlockWritable; -import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; import org.apache.hadoop.hbase.util.ChecksumType; import org.apache.hadoop.hbase.util.BloomFilterWriter; import org.apache.hadoop.hbase.util.Bytes; @@ -114,7 +113,6 @@ public class HFileWriterV2 extends AbstractHFileWriter { super(cacheConf, ostream == null ? createOutputStream(conf, fs, path) : ostream, path, blockSize, compressAlgo, blockEncoder, comparator); - SchemaMetrics.configureGlobally(conf); this.checksumType = checksumType; this.bytesPerChecksum = bytesPerChecksum; finishInit(conf); @@ -141,16 +139,6 @@ public class HFileWriterV2 extends AbstractHFileWriter { // Meta data block index writer metaBlockIndexWriter = new HFileBlockIndex.BlockIndexWriter(); LOG.debug("Initialized with " + cacheConf); - - if (isSchemaConfigured()) { - schemaConfigurationChanged(); - } - } - - @Override - protected void schemaConfigurationChanged() { - passSchemaMetricsTo(dataBlockIndexWriter); - passSchemaMetricsTo(metaBlockIndexWriter); } /** @@ -227,7 +215,6 @@ public class HFileWriterV2 extends AbstractHFileWriter { final boolean isCompaction = false; HFileBlock cacheFormatBlock = blockEncoder.diskToCacheFormat( fsBlockWriter.getBlockForCaching(), isCompaction); - passSchemaMetricsTo(cacheFormatBlock); cacheConf.getBlockCache().cacheBlock( new BlockCacheKey(name, offset, blockEncoder.getEncodingInCache(), cacheFormatBlock.getBlockType()), cacheFormatBlock); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 8a6929baf56..74b6212df4c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -44,7 +44,6 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; -import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ClassSize; import org.apache.hadoop.hbase.util.FSUtils; @@ -326,12 +325,6 @@ public class LruBlockCache implements BlockCache, HeapSize { if (evict) { heapsize *= -1; } - Cacheable cachedBlock = cb.getBuffer(); - SchemaMetrics schemaMetrics = cachedBlock.getSchemaMetrics(); - if (schemaMetrics != null) { - schemaMetrics.updateOnCachePutOrEvict( - cachedBlock.getBlockType().getCategory(), heapsize, evict); - } return size.addAndGet(heapsize); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java index 76da09f53ab..12668e9edb0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java @@ -65,7 +65,6 @@ import org.apache.hadoop.hbase.master.handler.DisableTableHandler; import org.apache.hadoop.hbase.master.handler.EnableTableHandler; import org.apache.hadoop.hbase.master.handler.OpenedRegionHandler; import org.apache.hadoop.hbase.master.handler.SplitRegionHandler; -import org.apache.hadoop.hbase.master.metrics.MasterMetrics; import org.apache.hadoop.hbase.regionserver.RegionAlreadyInTransitionException; import org.apache.hadoop.hbase.regionserver.RegionOpeningState; import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException; @@ -152,7 +151,7 @@ public class AssignmentManager extends ZooKeeperListener { EventType.RS_ZK_REGION_FAILED_OPEN, EventType.RS_ZK_REGION_CLOSED }); // metrics instance to send metrics for RITs - MasterMetrics masterMetrics; + MetricsMaster metricsMaster; private final RegionStates regionStates; @@ -176,7 +175,7 @@ public class AssignmentManager extends ZooKeeperListener { */ public AssignmentManager(Server server, ServerManager serverManager, CatalogTracker catalogTracker, final LoadBalancer balancer, - final ExecutorService service, MasterMetrics metrics) throws KeeperException, IOException { + final ExecutorService service, MetricsMaster metricsMaster) throws KeeperException, IOException { super(server.getZooKeeper()); this.server = server; this.serverManager = serverManager; @@ -200,7 +199,7 @@ public class AssignmentManager extends ZooKeeperListener { int maxThreads = conf.getInt("hbase.assignment.threads.max", 30); this.threadPoolExecutorService = Threads.getBoundedCachedThreadPool( maxThreads, 60L, TimeUnit.SECONDS, Threads.newDaemonThreadFactory("hbase-am")); - this.masterMetrics = metrics;// can be null only with tests. + this.metricsMaster = metricsMaster;// can be null only with tests. this.regionStates = new RegionStates(server, serverManager); int workers = conf.getInt("hbase.assignment.zkevent.workers", 5); @@ -2343,10 +2342,10 @@ public class AssignmentManager extends ZooKeeperListener { oldestRITTime = ritTime; } } - if (this.masterMetrics != null) { - this.masterMetrics.updateRITOldestAge(oldestRITTime); - this.masterMetrics.updateRITCount(totalRITs); - this.masterMetrics.updateRITCountOverThreshold(totalRITsOverThreshold); + if (this.metricsMaster != null) { + this.metricsMaster.updateRITOldestAge(oldestRITTime); + this.metricsMaster.updateRITCount(totalRITs); + this.metricsMaster.updateRITCountOverThreshold(totalRITsOverThreshold); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index a28d64a2a61..758dd3013fd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -47,6 +47,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.Chore; +import org.apache.hadoop.hbase.ClusterId; import org.apache.hadoop.hbase.ClusterStatus; import org.apache.hadoop.hbase.DeserializationException; import org.apache.hadoop.hbase.HColumnDescriptor; @@ -96,8 +97,6 @@ import org.apache.hadoop.hbase.master.handler.TableAddFamilyHandler; import org.apache.hadoop.hbase.master.handler.TableDeleteFamilyHandler; import org.apache.hadoop.hbase.master.handler.TableEventHandler; import org.apache.hadoop.hbase.master.handler.TableModifyFamilyHandler; -import org.apache.hadoop.hbase.master.metrics.MasterMetrics; -import org.apache.hadoop.hbase.master.metrics.MasterMetricsWrapperImpl; import org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer; import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.monitoring.TaskMonitor; @@ -247,7 +246,7 @@ Server { private final InetSocketAddress isa; // Metrics for the HMaster - private final MasterMetrics metrics; + private final MetricsMaster metricsMaster; // file system manager for the master FS operations private MasterFileSystem fileSystemManager; @@ -383,7 +382,7 @@ Server { //should we check the compression codec type at master side, default true, HBASE-6370 this.masterCheckCompression = conf.getBoolean("hbase.master.check.compression", true); - this.metrics = new MasterMetrics( new MasterMetricsWrapperImpl(this)); + this.metricsMaster = new MetricsMaster( new MetricsMasterWrapperImpl(this)); } /** @@ -413,8 +412,8 @@ Server { } - MasterMetrics getMetrics() { - return metrics; + MetricsMaster getMetrics() { + return metricsMaster; } /** @@ -523,7 +522,7 @@ Server { this.loadBalancerTracker = new LoadBalancerTracker(zooKeeper, this); this.loadBalancerTracker.start(); this.assignmentManager = new AssignmentManager(this, serverManager, - this.catalogTracker, this.balancer, this.executorService, this.metrics); + this.catalogTracker, this.balancer, this.executorService, this.metricsMaster); zooKeeper.registerListenerFirst(assignmentManager); this.regionServerTracker = new RegionServerTracker(zooKeeper, this, @@ -627,7 +626,7 @@ Server { status.setStatus("Initializing Master file system"); this.masterActiveTime = System.currentTimeMillis(); // TODO: Do this using Dependency Injection, using PicoContainer, Guice or Spring. - this.fileSystemManager = new MasterFileSystem(this, this, metrics, masterRecovery); + this.fileSystemManager = new MasterFileSystem(this, this, metricsMaster, masterRecovery); this.tableDescriptors = new FSTableDescriptors(this.fileSystemManager.getFileSystem(), @@ -1182,9 +1181,9 @@ Server { try { HBaseProtos.ServerLoad sl = request.getLoad(); this.serverManager.regionServerReport(ProtobufUtil.toServerName(request.getServer()), new ServerLoad(sl)); - if (sl != null && this.metrics != null) { + if (sl != null && this.metricsMaster != null) { // Up our metrics. - this.metrics.incrementRequests(sl.getTotalNumberOfRequests()); + this.metricsMaster.incrementRequests(sl.getTotalNumberOfRequests()); } } catch (IOException ioe) { throw new ServiceException(ioe); @@ -1834,7 +1833,14 @@ Server { } public String getClusterId() { - return fileSystemManager.getClusterId().toString(); + if (fileSystemManager == null) { + return ""; + } + ClusterId id = fileSystemManager.getClusterId(); + if (id == null) { + return ""; + } + return id.toString(); } /** @@ -2232,7 +2238,15 @@ Server { * @return the average load */ public double getAverageLoad() { - return this.assignmentManager.getRegionStates().getAverageLoad(); + if (this.assignmentManager == null) { + return 0; + } + + RegionStates regionStates = this.assignmentManager.getRegionStates(); + if (regionStates == null) { + return 0; + } + return regionStates.getAverageLoad(); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java index 8a1bc465efe..48714137f95 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java @@ -44,10 +44,8 @@ import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.backup.HFileArchiver; import org.apache.hadoop.hbase.fs.HFileSystem; -import org.apache.hadoop.hbase.master.metrics.MasterMetrics; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.wal.HLog; -import org.apache.hadoop.hbase.regionserver.RegionAlreadyInTransitionException; import org.apache.hadoop.hbase.regionserver.wal.HLogSplitter; import org.apache.hadoop.hbase.regionserver.wal.HLogUtil; import org.apache.hadoop.hbase.regionserver.wal.OrphanHLogAfterSplitException; @@ -69,7 +67,7 @@ public class MasterFileSystem { // master status Server master; // metrics for master - MasterMetrics metrics; + MetricsMaster metricsMaster; // Persisted unique cluster ID private ClusterId clusterId; // Keep around for convenience. @@ -87,12 +85,12 @@ public class MasterFileSystem { private final MasterServices services; public MasterFileSystem(Server master, MasterServices services, - MasterMetrics metrics, boolean masterRecovery) + MetricsMaster metricsMaster, boolean masterRecovery) throws IOException { this.conf = master.getConfiguration(); this.master = master; this.services = services; - this.metrics = metrics; + this.metricsMaster = metricsMaster; // Set filesystem to be that of this.rootdir else we get complaints about // mismatched filesystems if hbase.rootdir is hdfs and fs.defaultFS is // default localfs. Presumption is that rootdir is fully-qualified before @@ -317,8 +315,8 @@ public class MasterFileSystem { } } - if (this.metrics != null) { - this.metrics.addSplit(splitTime, splitLogSize); + if (this.metricsMaster != null) { + this.metricsMaster.addSplit(splitTime, splitLogSize); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetrics.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMaster.java similarity index 74% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetrics.java rename to hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMaster.java index 603d3e93e78..578bca4a242 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetrics.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMaster.java @@ -15,13 +15,16 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hbase.master.metrics; +package org.apache.hadoop.hbase.master; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hbase.CompatibilitySingletonFactory; +import org.apache.hadoop.hbase.master.MetricsMasterSource; +import org.apache.hadoop.hbase.master.MetricsMasterSourceFactory; +import org.apache.hadoop.hbase.master.MetricsMasterWrapper; /** * This class is for maintaining the various master statistics @@ -32,17 +35,17 @@ import org.apache.hadoop.hbase.CompatibilitySingletonFactory; */ @InterfaceStability.Evolving @InterfaceAudience.Private -public class MasterMetrics { +public class MetricsMaster { private final Log LOG = LogFactory.getLog(this.getClass()); - private MasterMetricsSource masterMetricsSource; + private MetricsMasterSource masterSource; - public MasterMetrics(MasterMetricsWrapper masterWrapper) { - masterMetricsSource = CompatibilitySingletonFactory.getInstance(MasterMetricsSourceFactory.class).create(masterWrapper); + public MetricsMaster(MetricsMasterWrapper masterWrapper) { + masterSource = CompatibilitySingletonFactory.getInstance(MetricsMasterSourceFactory.class).create(masterWrapper); } // for unit-test usage - public MasterMetricsSource getMetricsSource() { - return masterMetricsSource; + public MetricsMasterSource getMetricsSource() { + return masterSource; } /** @@ -51,15 +54,15 @@ public class MasterMetrics { * @param size length of original HLogs that were split */ public synchronized void addSplit(long time, long size) { - masterMetricsSource.updateSplitTime(time); - masterMetricsSource.updateSplitSize(size); + masterSource.updateSplitTime(time); + masterSource.updateSplitSize(size); } /** * @param inc How much to add to requests. */ public void incrementRequests(final int inc) { - masterMetricsSource.incRequests(inc); + masterSource.incRequests(inc); } @@ -68,7 +71,7 @@ public class MasterMetrics { * @param ritCount */ public void updateRITCount(int ritCount) { - masterMetricsSource.setRIT(ritCount); + masterSource.setRIT(ritCount); } /** @@ -77,13 +80,13 @@ public class MasterMetrics { * @param ritCountOverThreshold */ public void updateRITCountOverThreshold(int ritCountOverThreshold) { - masterMetricsSource.setRITCountOverThreshold(ritCountOverThreshold); + masterSource.setRITCountOverThreshold(ritCountOverThreshold); } /** * update the timestamp for oldest region in transition metrics. * @param timestamp */ public void updateRITOldestAge(long timestamp) { - masterMetricsSource.setRITOldestAge(timestamp); + masterSource.setRITOldestAge(timestamp); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapperImpl.java similarity index 62% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsWrapperImpl.java rename to hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapperImpl.java index 3a589869b09..dec2dd0e0d9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetricsWrapperImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapperImpl.java @@ -15,18 +15,21 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hbase.master.metrics; +package org.apache.hadoop.hbase.master; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.master.MetricsMasterWrapper; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; /** * Impl for exposing HMaster Information through JMX */ -public class MasterMetricsWrapperImpl implements MasterMetricsWrapper { +public class MetricsMasterWrapperImpl implements MetricsMasterWrapper { private final HMaster master; - public MasterMetricsWrapperImpl(final HMaster master) { + public MetricsMasterWrapperImpl(final HMaster master) { this.master = master; } @@ -42,7 +45,11 @@ public class MasterMetricsWrapperImpl implements MasterMetricsWrapper { @Override public String getZookeeperQuorum() { - return master.getZooKeeperWatcher().getQuorum(); + ZooKeeperWatcher zk = master.getZooKeeperWatcher(); + if (zk == null) { + return ""; + } + return zk.getQuorum(); } @Override @@ -51,28 +58,40 @@ public class MasterMetricsWrapperImpl implements MasterMetricsWrapper { } @Override - public long getMasterStartTime() { + public long getStartTime() { return master.getMasterStartTime(); } @Override - public long getMasterActiveTime() { + public long getActiveTime() { return master.getMasterActiveTime(); } @Override public int getRegionServers() { - return this.master.getServerManager().getOnlineServers().size(); + ServerManager serverManager = this.master.getServerManager(); + if (serverManager == null) { + return 0; + } + return serverManager.getOnlineServers().size(); } @Override public int getDeadRegionServers() { - return master.getServerManager().getDeadServers().size(); + ServerManager serverManager = this.master.getServerManager(); + if (serverManager == null) { + return 0; + } + return serverManager.getDeadServers().size(); } @Override public String getServerName() { - return master.getServerName().getServerName(); + ServerName serverName = master.getServerName(); + if (serverName == null) { + return ""; + } + return serverName.getServerName(); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 0169315d052..cbb444d53b2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -81,7 +81,6 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants.OperationStatusCode; import org.apache.hadoop.hbase.HDFSBlocksDistribution; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HServerInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.NotServingRegionException; @@ -116,11 +115,7 @@ import org.apache.hadoop.hbase.ipc.HBaseServer; import org.apache.hadoop.hbase.ipc.RpcCallContext; import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.monitoring.TaskMonitor; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; -import org.apache.hadoop.hbase.regionserver.metrics.OperationMetrics; -import org.apache.hadoop.hbase.regionserver.metrics.RegionMetricsStorage; -import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; import org.apache.hadoop.hbase.regionserver.wal.HLog; import org.apache.hadoop.hbase.regionserver.wal.HLogFactory; import org.apache.hadoop.hbase.regionserver.wal.HLogKey; @@ -235,16 +230,21 @@ public class HRegion implements HeapSize { // , Writable{ // private int [] storeSize = null; // private byte [] name = null; - final AtomicLong memstoreSize = new AtomicLong(0); + public final AtomicLong memstoreSize = new AtomicLong(0); // Debug possible data loss due to WAL off - final AtomicLong numPutsWithoutWAL = new AtomicLong(0); - final AtomicLong dataInMemoryWithoutWAL = new AtomicLong(0); + final Counter numPutsWithoutWAL = new Counter(); + final Counter dataInMemoryWithoutWAL = new Counter(); + // Debug why CAS operations are taking a while. final Counter checkAndMutateChecksPassed = new Counter(); final Counter checkAndMutateChecksFailed = new Counter(); + + //Number of requests final Counter readRequestsCount = new Counter(); final Counter writeRequestsCount = new Counter(); + + //How long operations were blocked by a memstore over highwater. final Counter updatesBlockedMs = new Counter(); /** @@ -362,7 +362,8 @@ public class HRegion implements HeapSize { // , Writable{ public final static String REGIONINFO_FILE = ".regioninfo"; private HTableDescriptor htableDescriptor = null; private RegionSplitPolicy splitPolicy; - private final OperationMetrics opMetrics; + + private final MetricsRegion metricsRegion; /** * Should only be used for testing purposes @@ -386,7 +387,7 @@ public class HRegion implements HeapSize { // , Writable{ this.coprocessorHost = null; this.scannerReadPoints = new ConcurrentHashMap(); - this.opMetrics = new OperationMetrics(); + this.metricsRegion = new MetricsRegion(new MetricsRegionWrapperImpl(this)); } /** @@ -449,7 +450,7 @@ public class HRegion implements HeapSize { // , Writable{ this.regiondir = getRegionDir(this.tableDir, encodedNameStr); this.scannerReadPoints = new ConcurrentHashMap(); - this.opMetrics = new OperationMetrics(conf, this.regionInfo); + this.metricsRegion = new MetricsRegion(new MetricsRegionWrapperImpl(this)); /* * timestamp.slop provides a server-side constraint on the timestamp. This @@ -839,21 +840,20 @@ public class HRegion implements HeapSize { // , Writable{ return this.rsServices; } - /** @return requestsCount for this region */ - public long getRequestsCount() { - return this.readRequestsCount.get() + this.writeRequestsCount.get(); - } - /** @return readRequestsCount for this region */ - public long getReadRequestsCount() { + long getReadRequestsCount() { return this.readRequestsCount.get(); } /** @return writeRequestsCount for this region */ - public long getWriteRequestsCount() { + long getWriteRequestsCount() { return this.writeRequestsCount.get(); } + MetricsRegion getMetrics() { + return metricsRegion; + } + /** @return true if region is closed */ public boolean isClosed() { return this.closed.get(); @@ -1023,7 +1023,7 @@ public class HRegion implements HeapSize { // , Writable{ status.setStatus("Running coprocessor post-close hooks"); this.coprocessorHost.postClose(abort); } - this.opMetrics.closeMetrics(); + this.metricsRegion.close(); status.markComplete("Closed"); LOG.info("Closed " + this); return result; @@ -1723,7 +1723,6 @@ public class HRegion implements HeapSize { // , Writable{ protected RegionScanner getScanner(Scan scan, List additionalScanners) throws IOException { startRegionOperation(); - this.readRequestsCount.increment(); try { // Verify families are all valid prepareScanner(scan); @@ -2322,26 +2321,20 @@ public class HRegion implements HeapSize { // , Writable{ } } - // do after lock - final long netTimeMs = EnvironmentEdgeManager.currentTimeMillis() - startTimeMs; - // See if the column families were consistent through the whole thing. // if they were then keep them. If they were not then pass a null. // null will be treated as unknown. // Total time taken might be involving Puts and Deletes. // Split the time for puts and deletes based on the total number of Puts and Deletes. - long timeTakenForPuts = 0; + if (noOfPuts > 0) { // There were some Puts in the batch. double noOfMutations = noOfPuts + noOfDeletes; - timeTakenForPuts = (long) (netTimeMs * (noOfPuts / noOfMutations)); - final Set keptCfs = putsCfSetConsistent ? putsCfSet : null; - this.opMetrics.updateMultiPutMetrics(keptCfs, timeTakenForPuts); + this.metricsRegion.updatePut(); } if (noOfDeletes > 0) { // There were some Deletes in the batch. - final Set keptCfs = deletesCfSetConsistent ? deletesCfSet : null; - this.opMetrics.updateMultiDeleteMetrics(keptCfs, netTimeMs - timeTakenForPuts); + this.metricsRegion.updateDelete(); } if (!success) { for (int i = firstIndex; i < lastIndexExclusive; i++) { @@ -3179,7 +3172,7 @@ public class HRegion implements HeapSize { // , Writable{ /** * See if row is currently locked. - * @param lockid + * @param lockId * @return boolean */ boolean isRowLocked(final Integer lockId) { @@ -4248,7 +4241,6 @@ public class HRegion implements HeapSize { // , Writable{ */ private List get(Get get, boolean withCoprocessor) throws IOException { - long now = EnvironmentEdgeManager.currentTimeMillis(); List results = new ArrayList(); @@ -4264,7 +4256,7 @@ public class HRegion implements HeapSize { // , Writable{ RegionScanner scanner = null; try { scanner = getScanner(scan); - scanner.next(results, SchemaMetrics.METRIC_GETSIZE); + scanner.next(results); } finally { if (scanner != null) scanner.close(); @@ -4276,8 +4268,8 @@ public class HRegion implements HeapSize { // , Writable{ } // do after lock - final long after = EnvironmentEdgeManager.currentTimeMillis(); - this.opMetrics.updateGetMetrics(get.familySet(), after - now); + + this.metricsRegion.updateGet(); return results; } @@ -4324,9 +4316,6 @@ public class HRegion implements HeapSize { // , Writable{ public void processRowsWithLocks(RowProcessor processor, long timeout) throws IOException { - final long startNanoTime = System.nanoTime(); - String metricsName = "rowprocessor." + processor.getName(); - for (byte[] row : processor.getRowsToLock()) { checkRow(row, "processRowsWithLocks"); } @@ -4349,20 +4338,13 @@ public class HRegion implements HeapSize { // , Writable{ processor, now, this, null, null, timeout); processor.postProcess(this, walEdit); } catch (IOException e) { - long endNanoTime = System.nanoTime(); - RegionMetricsStorage.incrTimeVaryingMetric(metricsName + ".error.nano", - endNanoTime - startNanoTime); throw e; } finally { closeRegionOperation(); } - final long endNanoTime = System.nanoTime(); - RegionMetricsStorage.incrTimeVaryingMetric(metricsName + ".nano", - endNanoTime - startNanoTime); return; } - long lockedNanoTime, processDoneNanoTime, unlockedNanoTime = 0; MultiVersionConsistencyControl.WriteEntry writeEntry = null; boolean locked = false; boolean walSyncSuccessful = false; @@ -4385,7 +4367,6 @@ public class HRegion implements HeapSize { // , Writable{ // 3. Region lock this.updatesLock.readLock().lock(); locked = true; - lockedNanoTime = System.nanoTime(); long now = EnvironmentEdgeManager.currentTimeMillis(); try { @@ -4393,7 +4374,6 @@ public class HRegion implements HeapSize { // , Writable{ // waledits doProcessRowWithTimeout( processor, now, this, mutations, walEdit, timeout); - processDoneNanoTime = System.nanoTime(); if (!mutations.isEmpty()) { // 5. Get a mvcc write number @@ -4418,7 +4398,6 @@ public class HRegion implements HeapSize { // , Writable{ this.updatesLock.readLock().unlock(); locked = false; } - unlockedNanoTime = System.nanoTime(); // 9. Release row lock(s) if (acquiredLocks != null) { @@ -4456,17 +4435,13 @@ public class HRegion implements HeapSize { // , Writable{ releaseRowLock(lid); } } - unlockedNanoTime = unlockedNanoTime == 0 ? - System.nanoTime() : unlockedNanoTime; + } // 12. Run post-process hook processor.postProcess(this, walEdit); } catch (IOException e) { - long endNanoTime = System.nanoTime(); - RegionMetricsStorage.incrTimeVaryingMetric(metricsName + ".error.nano", - endNanoTime - startNanoTime); throw e; } finally { closeRegionOperation(); @@ -4475,22 +4450,6 @@ public class HRegion implements HeapSize { // , Writable{ requestFlush(); } } - // Populate all metrics - long endNanoTime = System.nanoTime(); - RegionMetricsStorage.incrTimeVaryingMetric(metricsName + ".nano", - endNanoTime - startNanoTime); - - RegionMetricsStorage.incrTimeVaryingMetric(metricsName + ".acquirelock.nano", - lockedNanoTime - startNanoTime); - - RegionMetricsStorage.incrTimeVaryingMetric(metricsName + ".process.nano", - processDoneNanoTime - lockedNanoTime); - - RegionMetricsStorage.incrTimeVaryingMetric(metricsName + ".occupylock.nano", - unlockedNanoTime - lockedNanoTime); - - RegionMetricsStorage.incrTimeVaryingMetric(metricsName + ".sync.nano", - endNanoTime - unlockedNanoTime); } private void doProcessRowWithTimeout(final RowProcessor processor, @@ -4567,7 +4526,7 @@ public class HRegion implements HeapSize { // , Writable{ WALEdit walEdits = null; List allKVs = new ArrayList(append.size()); Map> tempMemstore = new HashMap>(); - long before = EnvironmentEdgeManager.currentTimeMillis(); + long size = 0; long txid = 0; @@ -4684,8 +4643,7 @@ public class HRegion implements HeapSize { // , Writable{ closeRegionOperation(); } - long after = EnvironmentEdgeManager.currentTimeMillis(); - this.opMetrics.updateAppendMetrics(append.getFamilyMap().keySet(), after - before); + this.metricsRegion.updateAppend(); if (flush) { @@ -4720,7 +4678,7 @@ public class HRegion implements HeapSize { // , Writable{ WALEdit walEdits = null; List allKVs = new ArrayList(increment.numColumns()); Map> tempMemstore = new HashMap>(); - long before = EnvironmentEdgeManager.currentTimeMillis(); + long size = 0; long txid = 0; @@ -4810,8 +4768,7 @@ public class HRegion implements HeapSize { // , Writable{ } } finally { closeRegionOperation(); - long after = EnvironmentEdgeManager.currentTimeMillis(); - this.opMetrics.updateIncrementMetrics(increment.getFamilyMap().keySet(), after - before); + this.metricsRegion.updateIncrement(); } if (flush) { @@ -5284,7 +5241,8 @@ public class HRegion implements HeapSize { // , Writable{ * These information are exposed by the region server metrics. */ private void recordPutWithoutWal(final Map> familyMap) { - if (numPutsWithoutWAL.getAndIncrement() == 0) { + numPutsWithoutWAL.increment(); + if (numPutsWithoutWAL.get() <= 1) { LOG.info("writing data to region " + this + " with WAL disabled. Data may be lost in the event of a crash."); } @@ -5296,7 +5254,7 @@ public class HRegion implements HeapSize { // , Writable{ } } - dataInMemoryWithoutWAL.addAndGet(putSize); + dataInMemoryWithoutWAL.add(putSize); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 06cbcce8ec8..e79ef487e2b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -48,13 +48,11 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ConcurrentSkipListMap; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.locks.ReentrantReadWriteLock; import javax.management.ObjectName; import com.google.protobuf.Message; -import org.apache.commons.lang.mutable.MutableDouble; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -67,7 +65,6 @@ import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.FailedSanityCheckException; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HDFSBlocksDistribution; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; @@ -107,9 +104,7 @@ import org.apache.hadoop.hbase.executor.ExecutorService.ExecutorType; import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; import org.apache.hadoop.hbase.filter.ByteArrayComparable; import org.apache.hadoop.hbase.fs.HFileSystem; -import org.apache.hadoop.hbase.io.hfile.BlockCache; import org.apache.hadoop.hbase.io.hfile.CacheConfig; -import org.apache.hadoop.hbase.io.hfile.CacheStats; import org.apache.hadoop.hbase.ipc.CoprocessorProtocol; import org.apache.hadoop.hbase.ipc.HBaseRPC; import org.apache.hadoop.hbase.ipc.HBaseRPCErrorHandler; @@ -191,11 +186,6 @@ import org.apache.hadoop.hbase.regionserver.handler.CloseRootHandler; import org.apache.hadoop.hbase.regionserver.handler.OpenMetaHandler; import org.apache.hadoop.hbase.regionserver.handler.OpenRegionHandler; import org.apache.hadoop.hbase.regionserver.handler.OpenRootHandler; -import org.apache.hadoop.hbase.regionserver.metrics.RegionMetricsStorage; -import org.apache.hadoop.hbase.regionserver.metrics.RegionServerDynamicMetrics; -import org.apache.hadoop.hbase.regionserver.metrics.RegionServerMetrics; -import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; -import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics.StoreMetricType; import org.apache.hadoop.hbase.regionserver.wal.HLog; import org.apache.hadoop.hbase.regionserver.wal.HLogUtil; import org.apache.hadoop.hbase.regionserver.wal.HLogFactory; @@ -225,6 +215,7 @@ import org.apache.hadoop.net.DNS; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.StringUtils; import org.apache.zookeeper.KeeperException; +import org.cliffc.high_scale_lib.Counter; import org.codehaus.jackson.map.ObjectMapper; import com.google.common.base.Function; @@ -297,9 +288,8 @@ public class HRegionServer implements ClientProtocol, // Instance of the hbase executor service. protected ExecutorService service; - // Request counter. - // Do we need this? Can't we just sum region counters? St.Ack 20110412 - protected AtomicInteger requestCount = new AtomicInteger(); + // Request counter. (Includes requests that are not serviced by regions.) + final Counter requestCount = new Counter(); // If false, the file system has become unavailable protected volatile boolean fsOk; @@ -366,9 +356,7 @@ public class HRegionServer implements ClientProtocol, */ private final LinkedList reservedSpace = new LinkedList(); - private RegionServerMetrics metrics; - - private RegionServerDynamicMetrics dynamicMetrics; + private MetricsRegionServer metricsRegionServer; /* * Check for compactions requests. @@ -403,7 +391,7 @@ public class HRegionServer implements ClientProtocol, private final RegionServerAccounting regionServerAccounting; // Cache configuration and block cache reference - private final CacheConfig cacheConfig; + final CacheConfig cacheConfig; // reference to the Thrift Server. volatile private HRegionThriftServer thriftServer; @@ -446,7 +434,6 @@ public class HRegionServer implements ClientProtocol, */ private final QosFunction qosFunction; - /** * Starts a HRegionServer at the default location * @@ -550,6 +537,10 @@ public class HRegionServer implements ClientProtocol, } } + String getClusterId() { + return this.conf.get(HConstants.CLUSTER_ID); + } + @Retention(RetentionPolicy.RUNTIME) protected @interface QosPriority { int priority() default 0; @@ -858,7 +849,6 @@ public class HRegionServer implements ClientProtocol, break; } } - registerMBean(); // We registered with the Master. Go into run mode. long lastMsg = 0; @@ -893,7 +883,6 @@ public class HRegionServer implements ClientProtocol, } long now = System.currentTimeMillis(); if ((now - lastMsg) >= msgInterval) { - doMetrics(); tryRegionServerReport(lastMsg, now); lastMsg = System.currentTimeMillis(); } @@ -1022,8 +1011,6 @@ public class HRegionServer implements ClientProtocol, void tryRegionServerReport(long reportStartTime, long reportEndTime) throws IOException { HBaseProtos.ServerLoad sl = buildServerLoad(reportStartTime, reportEndTime); - // Why we do this? - this.requestCount.set(0); try { RegionServerReportRequest.Builder request = RegionServerReportRequest.newBuilder(); ServerName sn = ServerName.parseVersionedServerName( @@ -1044,13 +1031,21 @@ public class HRegionServer implements ClientProtocol, } HBaseProtos.ServerLoad buildServerLoad(long reportStartTime, long reportEndTime) { + // We're getting the MetricsRegionServerWrapper here because the wrapper computes requests + // per second, and other metrics As long as metrics are part of ServerLoad it's best to use + // the wrapper to compute those numbers in one place. + // In the long term most of these should be moved off of ServerLoad and the heart beat. + // Instead they should be stored in an HBase table so that external visibility into HBase is + // improved; Additionally the load balancer will be able to take advantage of a more complete + // history. + MetricsRegionServerWrapper regionServerWrapper = this.metricsRegionServer.getRegionServerWrapper(); Collection regions = getOnlineRegionsLocalContext(); MemoryUsage memory = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage(); HBaseProtos.ServerLoad.Builder serverLoad = HBaseProtos.ServerLoad.newBuilder(); - serverLoad.setNumberOfRequests((int)metrics.getRequests()); - serverLoad.setTotalNumberOfRequests(requestCount.get()); + serverLoad.setNumberOfRequests((int) regionServerWrapper.getRequestsPerSecond()); + serverLoad.setTotalNumberOfRequests((int) regionServerWrapper.getTotalRequestCount()); serverLoad.setUsedHeapMB((int)(memory.getUsed() / 1024 / 1024)); serverLoad.setMaxHeapMB((int) (memory.getMax() / 1024 / 1024)); Set coprocessors = this.hlog.getCoprocessorHost().getCoprocessors(); @@ -1205,8 +1200,7 @@ public class HRegionServer implements ClientProtocol, this.tableDescriptors = new FSTableDescriptors(this.fs, this.rootDir, true); this.hlog = setupWALAndReplication(); // Init in here rather than in constructor after thread name has been set - this.metrics = new RegionServerMetrics(); - this.dynamicMetrics = RegionServerDynamicMetrics.newInstance(); + this.metricsRegionServer = new MetricsRegionServer(new MetricsRegionServerWrapperImpl(this)); startServiceThreads(); LOG.info("Serving as " + this.serverNameFromMasterPOV + ", RPC listening on " + this.isa + @@ -1441,179 +1435,8 @@ public class HRegionServer implements ClientProtocol, return hlogRoller; } - /* - * @param interval Interval since last time metrics were called. - */ - protected void doMetrics() { - try { - metrics(); - } catch (Throwable e) { - LOG.warn("Failed metrics", e); - } - } - - protected void metrics() { - this.metrics.regions.set(this.onlineRegions.size()); - this.metrics.incrementRequests(this.requestCount.get()); - this.metrics.requests.intervalHeartBeat(); - // Is this too expensive every three seconds getting a lock on onlineRegions - // and then per store carried? Can I make metrics be sloppier and avoid - // the synchronizations? - int stores = 0; - int storefiles = 0; - long memstoreSize = 0; - int readRequestsCount = 0; - int writeRequestsCount = 0; - long checkAndMutateChecksFailed = 0; - long checkAndMutateChecksPassed = 0; - long storefileIndexSize = 0; - HDFSBlocksDistribution hdfsBlocksDistribution = - new HDFSBlocksDistribution(); - long totalStaticIndexSize = 0; - long totalStaticBloomSize = 0; - long numPutsWithoutWAL = 0; - long dataInMemoryWithoutWAL = 0; - long updatesBlockedMs = 0; - - // Note that this is a map of Doubles instead of Longs. This is because we - // do effective integer division, which would perhaps truncate more than it - // should because we do it only on one part of our sum at a time. Rather - // than dividing at the end, where it is difficult to know the proper - // factor, everything is exact then truncated. - final Map tempVals = - new HashMap(); - - for (Map.Entry e : this.onlineRegions.entrySet()) { - HRegion r = e.getValue(); - memstoreSize += r.memstoreSize.get(); - numPutsWithoutWAL += r.numPutsWithoutWAL.get(); - dataInMemoryWithoutWAL += r.dataInMemoryWithoutWAL.get(); - readRequestsCount += r.readRequestsCount.get(); - writeRequestsCount += r.writeRequestsCount.get(); - checkAndMutateChecksFailed += r.checkAndMutateChecksFailed.get(); - checkAndMutateChecksPassed += r.checkAndMutateChecksPassed.get(); - updatesBlockedMs += r.updatesBlockedMs.get(); - synchronized (r.stores) { - stores += r.stores.size(); - for (Map.Entry ee : r.stores.entrySet()) { - final Store store = ee.getValue(); - final SchemaMetrics schemaMetrics = store.getSchemaMetrics(); - - { - long tmpStorefiles = store.getStorefilesCount(); - schemaMetrics.accumulateStoreMetric(tempVals, - StoreMetricType.STORE_FILE_COUNT, tmpStorefiles); - storefiles += tmpStorefiles; - } - - - { - long tmpStorefileIndexSize = store.getStorefilesIndexSize(); - schemaMetrics.accumulateStoreMetric(tempVals, - StoreMetricType.STORE_FILE_INDEX_SIZE, - (long) (tmpStorefileIndexSize / (1024.0 * 1024))); - storefileIndexSize += tmpStorefileIndexSize; - } - - { - long tmpStorefilesSize = store.getStorefilesSize(); - schemaMetrics.accumulateStoreMetric(tempVals, - StoreMetricType.STORE_FILE_SIZE_MB, - (long) (tmpStorefilesSize / (1024.0 * 1024))); - } - - { - long tmpStaticBloomSize = store.getTotalStaticBloomSize(); - schemaMetrics.accumulateStoreMetric(tempVals, - StoreMetricType.STATIC_BLOOM_SIZE_KB, - (long) (tmpStaticBloomSize / 1024.0)); - totalStaticBloomSize += tmpStaticBloomSize; - } - - { - long tmpStaticIndexSize = store.getTotalStaticIndexSize(); - schemaMetrics.accumulateStoreMetric(tempVals, - StoreMetricType.STATIC_INDEX_SIZE_KB, - (long) (tmpStaticIndexSize / 1024.0)); - totalStaticIndexSize += tmpStaticIndexSize; - } - - schemaMetrics.accumulateStoreMetric(tempVals, - StoreMetricType.MEMSTORE_SIZE_MB, - (long) (store.getMemStoreSize() / (1024.0 * 1024))); - } - } - - hdfsBlocksDistribution.add(r.getHDFSBlocksDistribution()); - } - - for (Entry e : tempVals.entrySet()) { - RegionMetricsStorage.setNumericMetric(e.getKey(), e.getValue().longValue()); - } - - this.metrics.stores.set(stores); - this.metrics.storefiles.set(storefiles); - this.metrics.memstoreSizeMB.set((int) (memstoreSize / (1024 * 1024))); - this.metrics.mbInMemoryWithoutWAL.set((int) (dataInMemoryWithoutWAL / (1024 * 1024))); - this.metrics.numPutsWithoutWAL.set(numPutsWithoutWAL); - this.metrics.storefileIndexSizeMB.set( - (int) (storefileIndexSize / (1024 * 1024))); - this.metrics.rootIndexSizeKB.set( - (int) (storefileIndexSize / 1024)); - this.metrics.totalStaticIndexSizeKB.set( - (int) (totalStaticIndexSize / 1024)); - this.metrics.totalStaticBloomSizeKB.set( - (int) (totalStaticBloomSize / 1024)); - this.metrics.readRequestsCount.set(readRequestsCount); - this.metrics.writeRequestsCount.set(writeRequestsCount); - this.metrics.checkAndMutateChecksFailed.set(checkAndMutateChecksFailed); - this.metrics.checkAndMutateChecksPassed.set(checkAndMutateChecksPassed); - this.metrics.compactionQueueSize.set(compactSplitThread - .getCompactionQueueSize()); - this.metrics.flushQueueSize.set(cacheFlusher - .getFlushQueueSize()); - this.metrics.updatesBlockedSeconds.update(updatesBlockedMs > 0 ? - updatesBlockedMs/1000: 0); - final long updatesBlockedMsHigherWater = cacheFlusher.getUpdatesBlockedMsHighWater().get(); - this.metrics.updatesBlockedSecondsHighWater.update(updatesBlockedMsHigherWater > 0 ? - updatesBlockedMsHigherWater/1000: 0); - - BlockCache blockCache = cacheConfig.getBlockCache(); - if (blockCache != null) { - this.metrics.blockCacheCount.set(blockCache.size()); - this.metrics.blockCacheFree.set(blockCache.getFreeSize()); - this.metrics.blockCacheSize.set(blockCache.getCurrentSize()); - CacheStats cacheStats = blockCache.getStats(); - this.metrics.blockCacheHitCount.set(cacheStats.getHitCount()); - this.metrics.blockCacheMissCount.set(cacheStats.getMissCount()); - this.metrics.blockCacheEvictedCount.set(blockCache.getEvictedCount()); - double ratio = blockCache.getStats().getHitRatio(); - int percent = (int) (ratio * 100); - this.metrics.blockCacheHitRatio.set(percent); - ratio = blockCache.getStats().getHitCachingRatio(); - percent = (int) (ratio * 100); - this.metrics.blockCacheHitCachingRatio.set(percent); - // past N period block cache hit / hit caching ratios - cacheStats.rollMetricsPeriod(); - ratio = cacheStats.getHitRatioPastNPeriods(); - percent = (int) (ratio * 100); - this.metrics.blockCacheHitRatioPastNPeriods.set(percent); - ratio = cacheStats.getHitCachingRatioPastNPeriods(); - percent = (int) (ratio * 100); - this.metrics.blockCacheHitCachingRatioPastNPeriods.set(percent); - } - float localityIndex = hdfsBlocksDistribution.getBlockLocalityIndex( - getServerName().getHostname()); - int percent = (int) (localityIndex * 100); - this.metrics.hdfsBlocksLocalityIndex.set(percent); - - } - - /** - * @return Region server metrics instance. - */ - public RegionServerMetrics getMetrics() { - return this.metrics; + public MetricsRegionServer getMetrics() { + return this.metricsRegionServer; } /** @@ -1841,9 +1664,6 @@ public class HRegionServer implements ClientProtocol, // java.util.HashSet's toString() method to print the coprocessor names. LOG.fatal("RegionServer abort: loaded coprocessors are: " + CoprocessorHost.getLoadedCoprocessors()); - if (this.metrics != null) { - LOG.info("Dump of metrics: " + this.metrics); - } // Do our best to report our abort to the master, but this may not work try { if (cause != null) { @@ -2146,45 +1966,7 @@ public class HRegionServer implements ClientProtocol, } /** - * @param encodedRegionName - * @return JSON Map of labels to values for passed in encodedRegionName - * @throws IOException - */ - public byte [] getRegionStats(final String encodedRegionName) - throws IOException { - HRegion r = null; - synchronized (this.onlineRegions) { - r = this.onlineRegions.get(encodedRegionName); - } - if (r == null) return null; - ObjectMapper mapper = new ObjectMapper(); - int stores = 0; - int storefiles = 0; - int storefileSizeMB = 0; - int memstoreSizeMB = (int) (r.memstoreSize.get() / 1024 / 1024); - int storefileIndexSizeMB = 0; - synchronized (r.stores) { - stores += r.stores.size(); - for (Store store : r.stores.values()) { - storefiles += store.getStorefilesCount(); - storefileSizeMB += (int) (store.getStorefilesSize() / 1024 / 1024); - storefileIndexSizeMB += (int) (store.getStorefilesIndexSize() / 1024 / 1024); - } - } - Map map = new TreeMap(); - map.put("stores", stores); - map.put("storefiles", storefiles); - map.put("storefileSizeMB", storefileSizeMB); - map.put("storefileIndexSizeMB", storefileIndexSizeMB); - map.put("memstoreSizeMB", memstoreSizeMB); - StringWriter w = new StringWriter(); - mapper.writeValue(w, map); - w.close(); - return Bytes.toBytes(w.toString()); - } - - /** - * For tests and web ui. + * For tests, web ui and metrics. * This method will only work if HRegionServer is in the same JVM as client; * HRegion cannot be serialized to cross an rpc. * @see #getOnlineRegions() @@ -2218,11 +2000,6 @@ public class HRegionServer implements ClientProtocol, return sortedRegions; } - /** @return the request count */ - public AtomicInteger getRequestCount() { - return this.requestCount; - } - /** * @return time stamp in millis of when this region server was started */ @@ -2497,16 +2274,6 @@ public class HRegionServer implements ClientProtocol, new ServerLoad(sl).getRegionServerCoprocessors(); } - /** - * Register bean with platform management server - */ - void registerMBean() { - MXBeanImpl mxBeanInfo = MXBeanImpl.init(this); - mxBean = MBeanUtil.registerMBean("RegionServer", "RegionServer", - mxBeanInfo); - LOG.info("Registered RegionServer MXBean"); - } - /** * Instantiated as a row lock lease. If the lease times out, the row lock is * released @@ -2685,14 +2452,7 @@ public class HRegionServer implements ClientProtocol, if (destination != null){ addToMovedRegions(encodedRegionName, destination); } - - //Clear all of the dynamic metrics as they are now probably useless. - //This is a clear because dynamic metrics could include metrics per cf and - //per hfile. Figuring out which cfs, hfiles, and regions are still relevant to - //this region server would be an onerous task. Instead just clear everything - //and on the next tick of the metrics everything that is still relevant will be - //re-added. - this.dynamicMetrics.clear(); + return toReturn != null; } @@ -2885,8 +2645,9 @@ public class HRegionServer implements ClientProtocol, @Override public GetResponse get(final RpcController controller, final GetRequest request) throws ServiceException { + long before = EnvironmentEdgeManager.currentTimeMillis(); try { - requestCount.incrementAndGet(); + requestCount.increment(); HRegion region = getRegion(request.getRegion()); GetResponse.Builder builder = GetResponse.newBuilder(); ClientProtos.Get get = request.getGet(); @@ -2926,6 +2687,8 @@ public class HRegionServer implements ClientProtocol, return builder.build(); } catch (IOException ie) { throw new ServiceException(ie); + } finally { + metricsRegionServer.updateGet(EnvironmentEdgeManager.currentTimeMillis() - before); } } @@ -2940,7 +2703,7 @@ public class HRegionServer implements ClientProtocol, public MutateResponse mutate(final RpcController controller, final MutateRequest request) throws ServiceException { try { - requestCount.incrementAndGet(); + requestCount.increment(); HRegion region = getRegion(request.getRegion()); MutateResponse.Builder builder = MutateResponse.newBuilder(); Mutate mutate = request.getMutate(); @@ -3073,7 +2836,7 @@ public class HRegionServer implements ClientProtocol, } throw e; } - requestCount.incrementAndGet(); + requestCount.increment(); try { int ttl = 0; @@ -3167,7 +2930,7 @@ public class HRegionServer implements ClientProtocol, for (int i = 0; i < rows && currentScanResultSize < maxResultSize; i++) { // Collect values to be returned here - boolean moreRows = scanner.next(values, SchemaMetrics.METRIC_NEXTSIZE); + boolean moreRows = scanner.next(values); if (!values.isEmpty()) { for (KeyValue kv : values) { currentScanResultSize += kv.heapSize(); @@ -3261,7 +3024,7 @@ public class HRegionServer implements ClientProtocol, throw new DoNotRetryIOException( "lockRow supports only one row now, not " + request.getRowCount() + " rows"); } - requestCount.incrementAndGet(); + requestCount.increment(); HRegion region = getRegion(request.getRegion()); byte[] row = request.getRow(0).toByteArray(); try { @@ -3292,7 +3055,7 @@ public class HRegionServer implements ClientProtocol, public UnlockRowResponse unlockRow(final RpcController controller, final UnlockRowRequest request) throws ServiceException { try { - requestCount.incrementAndGet(); + requestCount.increment(); HRegion region = getRegion(request.getRegion()); if (!request.hasLockId()) { throw new DoNotRetryIOException( @@ -3327,7 +3090,7 @@ public class HRegionServer implements ClientProtocol, public BulkLoadHFileResponse bulkLoadHFile(final RpcController controller, final BulkLoadHFileRequest request) throws ServiceException { try { - requestCount.incrementAndGet(); + requestCount.increment(); HRegion region = getRegion(request.getRegion()); List> familyPaths = new ArrayList>(); for (FamilyPath familyPath: request.getFamilyPathList()) { @@ -3374,7 +3137,7 @@ public class HRegionServer implements ClientProtocol, public ExecCoprocessorResponse execCoprocessor(final RpcController controller, final ExecCoprocessorRequest request) throws ServiceException { try { - requestCount.incrementAndGet(); + requestCount.increment(); HRegion region = getRegion(request.getRegion()); ExecCoprocessorResponse.Builder builder = ExecCoprocessorResponse.newBuilder(); @@ -3392,7 +3155,7 @@ public class HRegionServer implements ClientProtocol, public CoprocessorServiceResponse execService(final RpcController controller, final CoprocessorServiceRequest request) throws ServiceException { try { - requestCount.incrementAndGet(); + requestCount.increment(); HRegion region = getRegion(request.getRegion()); // ignore the passed in controller (from the serialized call) ServerRpcController execController = new ServerRpcController(); @@ -3441,7 +3204,7 @@ public class HRegionServer implements ClientProtocol, ActionResult.Builder resultBuilder = null; List mutates = new ArrayList(); for (ClientProtos.MultiAction actionUnion : request.getActionList()) { - requestCount.incrementAndGet(); + requestCount.increment(); try { Object result = null; if (actionUnion.hasGet()) { @@ -3524,7 +3287,7 @@ public class HRegionServer implements ClientProtocol, final GetRegionInfoRequest request) throws ServiceException { try { checkOpen(); - requestCount.incrementAndGet(); + requestCount.increment(); HRegion region = getRegion(request.getRegion()); HRegionInfo info = region.getRegionInfo(); GetRegionInfoResponse.Builder builder = GetRegionInfoResponse.newBuilder(); @@ -3544,7 +3307,7 @@ public class HRegionServer implements ClientProtocol, final GetStoreFileRequest request) throws ServiceException { try { HRegion region = getRegion(request.getRegion()); - requestCount.incrementAndGet(); + requestCount.increment(); Set columnFamilies = null; if (request.getFamilyCount() == 0) { columnFamilies = region.getStores().keySet(); @@ -3571,7 +3334,7 @@ public class HRegionServer implements ClientProtocol, final GetOnlineRegionRequest request) throws ServiceException { try { checkOpen(); - requestCount.incrementAndGet(); + requestCount.increment(); List list = new ArrayList(onlineRegions.size()); for (HRegion region: this.onlineRegions.values()) { list.add(region.getRegionInfo()); @@ -3602,7 +3365,7 @@ public class HRegionServer implements ClientProtocol, } catch (IOException ie) { throw new ServiceException(ie); } - requestCount.incrementAndGet(); + requestCount.increment(); OpenRegionResponse.Builder builder = OpenRegionResponse.newBuilder(); int regionCount = request.getOpenInfoCount(); Map htds = @@ -3694,7 +3457,6 @@ public class HRegionServer implements ClientProtocol, try { checkOpen(); - requestCount.incrementAndGet(); String encodedRegionName = ProtobufUtil.getRegionEncodedName(request.getRegion()); byte[] encodedName = Bytes.toBytes(encodedRegionName); @@ -3706,6 +3468,7 @@ public class HRegionServer implements ClientProtocol, checkIfRegionInTransition(encodedName, CLOSE); } HRegion region = getRegionByEncodedName(encodedRegionName); + requestCount.increment(); LOG.info("Received close region: " + region.getRegionNameAsString() + ". Version of ZK closing node:" + versionOfClosingNode + ". Destination server:" + sn); @@ -3734,7 +3497,7 @@ public class HRegionServer implements ClientProtocol, final FlushRegionRequest request) throws ServiceException { try { checkOpen(); - requestCount.incrementAndGet(); + requestCount.increment(); HRegion region = getRegion(request.getRegion()); LOG.info("Flushing " + region.getRegionNameAsString()); boolean shouldFlush = true; @@ -3765,7 +3528,7 @@ public class HRegionServer implements ClientProtocol, final SplitRegionRequest request) throws ServiceException { try { checkOpen(); - requestCount.incrementAndGet(); + requestCount.increment(); HRegion region = getRegion(request.getRegion()); LOG.info("Splitting " + region.getRegionNameAsString()); region.flushcache(); @@ -3794,7 +3557,7 @@ public class HRegionServer implements ClientProtocol, final CompactRegionRequest request) throws ServiceException { try { checkOpen(); - requestCount.incrementAndGet(); + requestCount.increment(); HRegion region = getRegion(request.getRegion()); LOG.info("Compacting " + region.getRegionNameAsString()); boolean major = false; @@ -3829,7 +3592,7 @@ public class HRegionServer implements ClientProtocol, try { if (replicationSinkHandler != null) { checkOpen(); - requestCount.incrementAndGet(); + requestCount.increment(); HLog.Entry[] entries = ProtobufUtil.toHLogEntries(request.getEntryList()); if (entries != null && entries.length > 0) { replicationSinkHandler.replicateLogEntries(entries); @@ -3852,7 +3615,7 @@ public class HRegionServer implements ClientProtocol, public RollWALWriterResponse rollWALWriter(final RpcController controller, final RollWALWriterRequest request) throws ServiceException { try { - requestCount.incrementAndGet(); + requestCount.increment(); HLog wal = this.getWAL(); byte[][] regionsToFlush = wal.rollWriter(true); RollWALWriterResponse.Builder builder = RollWALWriterResponse.newBuilder(); @@ -3877,7 +3640,7 @@ public class HRegionServer implements ClientProtocol, @Override public StopServerResponse stopServer(final RpcController controller, final StopServerRequest request) throws ServiceException { - requestCount.incrementAndGet(); + requestCount.increment(); String reason = request.getReason(); stop(reason); return StopServerResponse.newBuilder().build(); @@ -3894,7 +3657,7 @@ public class HRegionServer implements ClientProtocol, public GetServerInfoResponse getServerInfo(final RpcController controller, final GetServerInfoRequest request) throws ServiceException { ServerName serverName = getServerName(); - requestCount.incrementAndGet(); + requestCount.increment(); return ResponseConverter.buildGetServerInfoResponse(serverName, webuiport); } @@ -3924,6 +3687,7 @@ public class HRegionServer implements ClientProtocol, */ protected Result append(final HRegion region, final Mutate mutate) throws IOException { + long before = EnvironmentEdgeManager.currentTimeMillis(); Append append = ProtobufUtil.toAppend(mutate); Result r = null; if (region.getCoprocessorHost() != null) { @@ -3936,6 +3700,7 @@ public class HRegionServer implements ClientProtocol, region.getCoprocessorHost().postAppend(append, r); } } + metricsRegionServer.updateAppend(EnvironmentEdgeManager.currentTimeMillis() - before); return r; } @@ -3949,6 +3714,7 @@ public class HRegionServer implements ClientProtocol, */ protected Result increment(final HRegion region, final Mutate mutate) throws IOException { + long before = EnvironmentEdgeManager.currentTimeMillis(); Increment increment = ProtobufUtil.toIncrement(mutate); Result r = null; if (region.getCoprocessorHost() != null) { @@ -3961,6 +3727,7 @@ public class HRegionServer implements ClientProtocol, r = region.getCoprocessorHost().postIncrement(increment, r); } } + metricsRegionServer.updateIncrement(EnvironmentEdgeManager.currentTimeMillis() - before); return r; } @@ -3975,7 +3742,8 @@ public class HRegionServer implements ClientProtocol, final HRegion region, final List mutates) { @SuppressWarnings("unchecked") Pair[] mutationsWithLocks = new Pair[mutates.size()]; - + long before = EnvironmentEdgeManager.currentTimeMillis(); + boolean batchContainsPuts = false, batchContainsDelete = false; try { ActionResult.Builder resultBuilder = ActionResult.newBuilder(); NameBytesPair value = ProtobufUtil.toParameter(new Result()); @@ -3987,15 +3755,18 @@ public class HRegionServer implements ClientProtocol, Mutation mutation = null; if (m.getMutateType() == MutateType.PUT) { mutation = ProtobufUtil.toPut(m); + batchContainsPuts = true; } else { mutation = ProtobufUtil.toDelete(m); + batchContainsDelete = true; } Integer lock = getLockFromId(mutation.getLockId()); mutationsWithLocks[i++] = new Pair(mutation, lock); builder.addResult(result); } - requestCount.addAndGet(mutates.size()); + + requestCount.add(mutates.size()); if (!region.getRegionInfo().isMetaTable()) { cacheFlusher.reclaimMemStoreMemory(); } @@ -4031,6 +3802,13 @@ public class HRegionServer implements ClientProtocol, builder.setResult(i, result); } } + long after = EnvironmentEdgeManager.currentTimeMillis(); + if (batchContainsPuts) { + metricsRegionServer.updatePut(after - before); + } + if (batchContainsDelete) { + metricsRegionServer.updateDelete(after - before); + } } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index 88288adeeb4..8f7aaf70e02 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -66,8 +66,6 @@ import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.regionserver.compactions.CompactSelection; import org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress; import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; -import org.apache.hadoop.hbase.regionserver.metrics.SchemaConfigured; -import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ChecksumType; import org.apache.hadoop.hbase.util.ClassSize; @@ -106,7 +104,7 @@ import com.google.common.collect.Lists; * not be called directly but by an HRegion manager. */ @InterfaceAudience.Private -public class HStore extends SchemaConfigured implements Store { +public class HStore implements Store { static final Log LOG = LogFactory.getLog(HStore.class); protected final MemStore memstore; @@ -174,9 +172,7 @@ public class HStore extends SchemaConfigured implements Store { protected HStore(Path basedir, HRegion region, HColumnDescriptor family, FileSystem fs, Configuration confParam) throws IOException { - super(new CompoundConfiguration().add(confParam).add( - family.getValues()), region.getRegionInfo().getTableNameAsString(), - Bytes.toString(family.getName())); + HRegionInfo info = region.getRegionInfo(); this.fs = fs; // Assemble the store's home directory. @@ -260,6 +256,15 @@ public class HStore extends SchemaConfigured implements Store { return ttl; } + public String getColumnFamilyName() { + return this.family.getNameAsString(); + } + + @Override + public String getTableName() { + return this.region.getTableDesc().getNameAsString(); + } + /** * Create this store's homedir * @param fs @@ -414,7 +419,6 @@ public class HStore extends SchemaConfigured implements Store { public StoreFile call() throws IOException { StoreFile storeFile = new StoreFile(fs, p, conf, cacheConf, family.getBloomFilterType(), dataBlockEncoder); - passSchemaMetricsTo(storeFile); storeFile.createReader(); return storeFile; } @@ -573,7 +577,6 @@ public class HStore extends SchemaConfigured implements Store { StoreFile sf = new StoreFile(fs, dstPath, this.conf, this.cacheConf, this.family.getBloomFilterType(), this.dataBlockEncoder); - passSchemaMetricsTo(sf); StoreFile.Reader r = sf.createReader(); this.storeSize += r.length(); @@ -817,19 +820,11 @@ public class HStore extends SchemaConfigured implements Store { status.setStatus("Flushing " + this + ": reopening flushed file"); StoreFile sf = new StoreFile(this.fs, dstPath, this.conf, this.cacheConf, this.family.getBloomFilterType(), this.dataBlockEncoder); - passSchemaMetricsTo(sf); StoreFile.Reader r = sf.createReader(); this.storeSize += r.length(); this.totalUncompressedBytes += r.getTotalUncompressedBytes(); - // This increments the metrics associated with total flushed bytes for this - // family. The overall flush count is stored in the static metrics and - // retrieved from HRegion.recentFlushes, which is set within - // HRegion.internalFlushcache, which indirectly calls this to actually do - // the flushing through the StoreFlusherImpl class - getSchemaMetrics().updatePersistentStoreMetric( - SchemaMetrics.StoreMetricType.FLUSH_SIZE, flushedSize.longValue()); if (LOG.isInfoEnabled()) { LOG.info("Added " + sf + ", entries=" + r.getEntries() + ", sequenceid=" + logCacheFlushId + @@ -875,11 +870,6 @@ public class HStore extends SchemaConfigured implements Store { .withBytesPerChecksum(bytesPerChecksum) .withCompression(compression) .build(); - // The store file writer's path does not include the CF name, so we need - // to configure the HFile writer directly. - SchemaConfigured sc = (SchemaConfigured) w.writer; - SchemaConfigured.resetSchemaMetricsConf(sc); - passSchemaMetricsTo(sc); return w; } @@ -1409,8 +1399,8 @@ public class HStore extends SchemaConfigured implements Store { (forcemajor || isMajorCompaction(compactSelection.getFilesToCompact())) && (compactSelection.getFilesToCompact().size() < this.maxFilesToCompact ); - LOG.debug(this.getHRegionInfo().getEncodedName() + " - " + - this.getColumnFamilyName() + ": Initiating " + + LOG.debug(this.getHRegionInfo().getEncodedName() + " - " + + this.getColumnFamilyName() + ": Initiating " + (majorcompaction ? "major" : "minor") + "compaction"); if (!majorcompaction && @@ -1523,7 +1513,6 @@ public class HStore extends SchemaConfigured implements Store { storeFile = new StoreFile(this.fs, path, this.conf, this.cacheConf, this.family.getBloomFilterType(), NoOpDataBlockEncoder.INSTANCE); - passSchemaMetricsTo(storeFile); storeFile.createReader(); } catch (IOException e) { LOG.error("Failed to open store file : " + path @@ -1575,7 +1564,6 @@ public class HStore extends SchemaConfigured implements Store { } result = new StoreFile(this.fs, destPath, this.conf, this.cacheConf, this.family.getBloomFilterType(), this.dataBlockEncoder); - passSchemaMetricsTo(result); result.createReader(); } try { @@ -1936,7 +1924,7 @@ public class HStore extends SchemaConfigured implements Store { @Override public String toString() { - return getColumnFamilyName(); + return this.getColumnFamilyName(); } @Override @@ -2125,9 +2113,8 @@ public class HStore extends SchemaConfigured implements Store { } public static final long FIXED_OVERHEAD = - ClassSize.align(SchemaConfigured.SCHEMA_CONFIGURED_UNALIGNED_HEAP_SIZE + - + (17 * ClassSize.REFERENCE) + (6 * Bytes.SIZEOF_LONG) - + (5 * Bytes.SIZEOF_INT) + Bytes.SIZEOF_BOOLEAN); + ClassSize.align((19 * ClassSize.REFERENCE) + (6 * Bytes.SIZEOF_LONG) + + (5 * Bytes.SIZEOF_INT) + Bytes.SIZEOF_BOOLEAN); public static final long DEEP_OVERHEAD = ClassSize.align(FIXED_OVERHEAD + ClassSize.OBJECT + ClassSize.REENTRANT_LOCK diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java index 9300fc74fe4..2fcf4cd5c70 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java @@ -416,7 +416,6 @@ class MemStoreFlusher extends HasThread implements FlushRequester { server.compactSplitThread.requestCompaction(region, getName()); } - server.getMetrics().addFlush(region.getRecentFlushInfo()); } catch (DroppedSnapshotException ex) { // Cache flush can fail in a few places. If it fails in a critical // section, we get a DroppedSnapshotException and a replay of hlog diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegion.java new file mode 100644 index 00000000000..70795305cb8 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegion.java @@ -0,0 +1,66 @@ +/* + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.CompatibilityFactory; + + +/** + * This is the glue between the HRegion and whatever hadoop shim layer + * is loaded (hbase-hadoop1-compat or hbase-hadoop2-compat). + */ +@InterfaceAudience.Private +public class MetricsRegion { + + private MetricsRegionSource source; + + public MetricsRegion(MetricsRegionWrapper wrapper) { + source = CompatibilityFactory.getInstance(MetricsRegionServerSourceFactory.class) + .createRegion(wrapper); + } + + public void close() { + source.close(); + } + + public void updatePut() { + source.updatePut(); + } + + public void updateDelete() { + source.updateDelete(); + } + + public void updateGet() { + source.updateGet(); + } + + public void updateAppend() { + source.updateAppend(); + } + + public void updateIncrement() { + source.updateIncrement(); + } + + MetricsRegionSource getSource() { + return source; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java new file mode 100644 index 00000000000..3c84213e3c8 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java @@ -0,0 +1,75 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.CompatibilitySingletonFactory; + +/** + * This class is for maintaining the various regionserver statistics + * and publishing them through the metrics interfaces. + *

    + * This class has a number of metrics variables that are publicly accessible; + * these variables (objects) have methods to update their values. + */ +@InterfaceStability.Evolving +@InterfaceAudience.Private +public class MetricsRegionServer { + private final Log LOG = LogFactory.getLog(this.getClass()); + private MetricsRegionServerSource serverSource; + private MetricsRegionServerWrapper regionServerWrapper; + + public MetricsRegionServer(MetricsRegionServerWrapper regionServerWrapper) { + this.regionServerWrapper = regionServerWrapper; + serverSource = + CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class) + .createServer(regionServerWrapper); + } + + // for unit-test usage + public MetricsRegionServerSource getMetricsSource() { + return serverSource; + } + + public MetricsRegionServerWrapper getRegionServerWrapper() { + return regionServerWrapper; + } + + public void updatePut(long t){ + serverSource.updatePut(t); + } + + public void updateDelete(long t){ + serverSource.updateDelete(t); + } + + public void updateGet(long t){ + serverSource.updateGet(t); + } + + public void updateIncrement(long t){ + serverSource.updateIncrement(t); + } + + public void updateAppend(long t){ + serverSource.updateAppend(t); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java new file mode 100644 index 00000000000..22f7af7e2be --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java @@ -0,0 +1,395 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import org.apache.commons.lang.StringUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.CompatibilitySingletonFactory; +import org.apache.hadoop.hbase.HDFSBlocksDistribution; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.io.hfile.BlockCache; +import org.apache.hadoop.hbase.io.hfile.CacheStats; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.metrics2.MetricsExecutor; + +import java.util.Collection; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; + +/** + * Impl for exposing HRegionServer Information through Hadoop's metrics 2 system. + */ +@InterfaceAudience.Private +class MetricsRegionServerWrapperImpl + implements MetricsRegionServerWrapper { + + public static final Log LOG = LogFactory.getLog(MetricsRegionServerWrapperImpl.class); + + public static final int PERIOD = 15; + + private final HRegionServer regionServer; + private final BlockCache blockCache; + + private volatile long numStores = 0; + private volatile long numStoreFiles = 0; + private volatile long memstoreSize = 0; + private volatile long storeFileSize = 0; + private volatile double requestsPerSecond = 0.0; + private volatile long readRequestsCount = 0; + private volatile long writeRequestsCount = 0; + private volatile long checkAndMutateChecksFailed = 0; + private volatile long checkAndMutateChecksPassed = 0; + private volatile long storefileIndexSize = 0; + private volatile long totalStaticIndexSize = 0; + private volatile long totalStaticBloomSize = 0; + private volatile long numPutsWithoutWAL = 0; + private volatile long dataInMemoryWithoutWAL = 0; + private volatile int percentFileLocal = 0; + + private CacheStats cacheStats; + private ScheduledExecutorService executor; + private Runnable runnable; + + public MetricsRegionServerWrapperImpl(final HRegionServer regionServer) { + this.regionServer = regionServer; + this.blockCache = this.regionServer.cacheConfig.getBlockCache(); + this.cacheStats = blockCache.getStats(); + this.executor = CompatibilitySingletonFactory.getInstance(MetricsExecutor.class).getExecutor(); + this.runnable = new RegionServerMetricsWrapperRunnable(); + this.executor.scheduleWithFixedDelay(this.runnable, PERIOD, PERIOD, TimeUnit.SECONDS); + } + + @Override + public String getClusterId() { + return regionServer.getClusterId(); + } + + @Override + public long getStartCode() { + return regionServer.getStartcode(); + } + + @Override + public String getZookeeperQuorum() { + ZooKeeperWatcher zk = regionServer.getZooKeeperWatcher(); + if (zk == null) { + return ""; + } + return zk.getQuorum(); + } + + @Override + public String getCoprocessors() { + String[] coprocessors = regionServer.getCoprocessors(); + if (coprocessors == null || coprocessors.length == 0) { + return ""; + } + return StringUtils.join(coprocessors, ", "); + } + + @Override + public String getServerName() { + ServerName serverName = regionServer.getServerName(); + if (serverName == null) { + return ""; + } + return serverName.getServerName(); + } + + @Override + public long getNumOnlineRegions() { + Collection onlineRegionsLocalContext = regionServer.getOnlineRegionsLocalContext(); + if (onlineRegionsLocalContext == null) { + return 0; + } + return onlineRegionsLocalContext.size(); + } + + @Override + public long getTotalRequestCount() { + return regionServer.requestCount.get(); + } + + @Override + public int getCompactionQueueSize() { + //The thread could be zero. if so assume there is no queue. + if (this.regionServer.compactSplitThread == null) { + return 0; + } + return this.regionServer.compactSplitThread.getCompactionQueueSize(); + } + + @Override + public int getFlushQueueSize() { + //If there is no flusher there should be no queue. + if (this.regionServer.cacheFlusher == null) { + return 0; + } + return this.regionServer.cacheFlusher.getFlushQueueSize(); + } + + @Override + public long getBlockCacheCount() { + if (this.blockCache == null) { + return 0; + } + return this.blockCache.size(); + } + + @Override + public long getBlockCacheSize() { + if (this.blockCache == null) { + return 0; + } + return this.blockCache.getCurrentSize(); + } + + @Override + public long getBlockCacheFreeSize() { + if (this.blockCache == null) { + return 0; + } + return this.blockCache.getFreeSize(); + } + + @Override + public long getBlockCacheHitCount() { + if (this.cacheStats == null) { + return 0; + } + return this.cacheStats.getHitCount(); + } + + @Override + public long getBlockCacheMissCount() { + if (this.cacheStats == null) { + return 0; + } + return this.cacheStats.getMissCount(); + } + + @Override + public long getBlockCacheEvictedCount() { + if (this.cacheStats == null) { + return 0; + } + return this.cacheStats.getEvictedCount(); + } + + @Override + public int getBlockCacheHitPercent() { + if (this.cacheStats == null) { + return 0; + } + return (int) (this.cacheStats.getHitRatio() * 100); + } + + @Override + public int getBlockCacheHitCachingPercent() { + if (this.cacheStats == null) { + return 0; + } + return (int) (this.cacheStats.getHitCachingRatio() * 100); + } + + @Override public void forceRecompute() { + this.runnable.run(); + } + + @Override + public long getNumStores() { + return numStores; + } + + @Override + public long getNumStoreFiles() { + return numStoreFiles; + } + + @Override + public long getMemstoreSize() { + return memstoreSize; + } + + @Override + public long getStoreFileSize() { + return storeFileSize; + } + + @Override public double getRequestsPerSecond() { + return requestsPerSecond; + } + + @Override + public long getReadRequestsCount() { + return readRequestsCount; + } + + @Override + public long getWriteRequestsCount() { + return writeRequestsCount; + } + + @Override + public long getCheckAndMutateChecksFailed() { + return checkAndMutateChecksFailed; + } + + @Override + public long getCheckAndMutateChecksPassed() { + return checkAndMutateChecksPassed; + } + + @Override + public long getStoreFileIndexSize() { + return storefileIndexSize; + } + + @Override + public long getTotalStaticIndexSize() { + return totalStaticIndexSize; + } + + @Override + public long getTotalStaticBloomSize() { + return totalStaticBloomSize; + } + + @Override + public long getNumPutsWithoutWAL() { + return numPutsWithoutWAL; + } + + @Override + public long getDataInMemoryWithoutWAL() { + return dataInMemoryWithoutWAL; + } + + @Override + public int getPercentFileLocal() { + return percentFileLocal; + } + + @Override + public long getUpdatesBlockedTime() { + if (this.regionServer.cacheFlusher == null) { + return 0; + } + return this.regionServer.cacheFlusher.getUpdatesBlockedMsHighWater().get(); + } + + + /** + * This is the runnable that will be executed on the executor every PERIOD number of seconds + * It will take metrics/numbers from all of the regions and use them to compute point in + * time metrics. + */ + public class RegionServerMetricsWrapperRunnable implements Runnable { + + private long lastRan = 0; + private long lastRequestCount = 0; + + @Override + synchronized public void run() { + + cacheStats = blockCache.getStats(); + + HDFSBlocksDistribution hdfsBlocksDistribution = + new HDFSBlocksDistribution(); + + long tempNumStores = 0; + long tempNumStoreFiles = 0; + long tempMemstoreSize = 0; + long tempStoreFileSize = 0; + long tempReadRequestsCount = 0; + long tempWriteRequestsCount = 0; + long tempCheckAndMutateChecksFailed = 0; + long tempCheckAndMutateChecksPassed = 0; + long tempStorefileIndexSize = 0; + long tempTotalStaticIndexSize = 0; + long tempTotalStaticBloomSize = 0; + long tempNumPutsWithoutWAL = 0; + long tempDataInMemoryWithoutWAL = 0; + int tempPercentFileLocal = 0; + + + for (HRegion r : regionServer.getOnlineRegionsLocalContext()) { + tempNumPutsWithoutWAL += r.numPutsWithoutWAL.get(); + tempDataInMemoryWithoutWAL += r.dataInMemoryWithoutWAL.get(); + tempReadRequestsCount += r.readRequestsCount.get(); + tempWriteRequestsCount += r.writeRequestsCount.get(); + tempCheckAndMutateChecksFailed += r.checkAndMutateChecksFailed.get(); + tempCheckAndMutateChecksPassed += r.checkAndMutateChecksPassed.get(); + tempNumStores += r.stores.size(); + for (Store store : r.stores.values()) { + tempNumStoreFiles += store.getStorefilesCount(); + tempMemstoreSize += store.getMemStoreSize(); + tempStoreFileSize += store.getStorefilesSize(); + tempStorefileIndexSize += store.getStorefilesIndexSize(); + tempTotalStaticBloomSize += store.getTotalStaticBloomSize(); + tempTotalStaticIndexSize += store.getTotalStaticIndexSize(); + } + + hdfsBlocksDistribution.add(r.getHDFSBlocksDistribution()); + } + + float localityIndex = hdfsBlocksDistribution.getBlockLocalityIndex( + regionServer.getServerName().getHostname()); + tempPercentFileLocal = (int) (localityIndex * 100); + + + //Compute the number of requests per second + long currentTime = EnvironmentEdgeManager.currentTimeMillis(); + + // assume that it took PERIOD seconds to start the executor. + // this is a guess but it's a pretty good one. + if (lastRan == 0) { + lastRan = currentTime - (PERIOD*1000); + } + + + //If we've time traveled keep the last requests per second. + if ((currentTime - lastRan) > 10) { + long currentRequestCount = getTotalRequestCount(); + requestsPerSecond = (currentRequestCount - lastRequestCount) / ((currentTime - lastRan) / 1000.0); + lastRequestCount = currentRequestCount; + } + lastRan = currentTime; + + //Copy over computed values so that no thread sees half computed values. + numStores = tempNumStores; + numStoreFiles = tempNumStoreFiles; + memstoreSize = tempMemstoreSize; + storeFileSize = tempStoreFileSize; + readRequestsCount = tempReadRequestsCount; + writeRequestsCount = tempWriteRequestsCount; + checkAndMutateChecksFailed = tempCheckAndMutateChecksFailed; + checkAndMutateChecksPassed = tempCheckAndMutateChecksPassed; + storefileIndexSize = tempStorefileIndexSize; + totalStaticIndexSize = tempTotalStaticIndexSize; + totalStaticBloomSize = tempTotalStaticBloomSize; + numPutsWithoutWAL = tempNumPutsWithoutWAL; + dataInMemoryWithoutWAL = tempDataInMemoryWithoutWAL; + percentFileLocal = tempPercentFileLocal; + } + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java new file mode 100644 index 00000000000..64d570dcac4 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java @@ -0,0 +1,105 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.regionserver; + +import org.apache.hadoop.hbase.CompatibilitySingletonFactory; +import org.apache.hadoop.metrics2.MetricsExecutor; + +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; + +public class MetricsRegionWrapperImpl implements MetricsRegionWrapper { + + public static final int PERIOD = 45; + + private final HRegion region; + private ScheduledExecutorService executor; + private Runnable runnable; + private long numStoreFiles; + private long memstoreSize; + private long storeFileSize; + + public MetricsRegionWrapperImpl(HRegion region) { + this.region = region; + this.executor = CompatibilitySingletonFactory.getInstance(MetricsExecutor.class).getExecutor(); + this.runnable = new HRegionMetricsWrapperRunnable(); + this.executor.scheduleWithFixedDelay(this.runnable, PERIOD, PERIOD, TimeUnit.SECONDS); + } + + @Override + public String getTableName() { + return this.region.getTableDesc().getNameAsString(); + } + + @Override + public String getRegionName() { + return this.region.getRegionInfo().getEncodedName(); + } + + @Override + public long getNumStores() { + return this.region.stores.size(); + } + + @Override + public long getNumStoreFiles() { + return numStoreFiles; + } + + @Override + public long getMemstoreSize() { + return memstoreSize; + } + + @Override + public long getStoreFileSize() { + return storeFileSize; + } + + @Override + public long getReadRequestCount() { + return this.region.getReadRequestsCount(); + } + + @Override + public long getWriteRequestCount() { + return this.region.getWriteRequestsCount(); + } + + public class HRegionMetricsWrapperRunnable implements Runnable { + + @Override + public void run() { + long tempNumStoreFiles = 0; + long tempMemstoreSize = 0; + long tempStoreFileSize = 0; + + for (Store store : region.stores.values()) { + tempNumStoreFiles += store.getStorefilesCount(); + tempMemstoreSize += store.getMemStoreSize(); + tempStoreFileSize += store.getStorefilesSize(); + } + + numStoreFiles = tempNumStoreFiles; + memstoreSize = tempMemstoreSize; + storeFileSize = tempStoreFileSize; + } + } + +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java index f945ffd277f..0b5a3162339 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java @@ -66,7 +66,6 @@ class SplitRequest implements Runnable { if (!st.prepare()) return; try { st.execute(this.server, this.server); - this.server.getMetrics().incrementSplitSuccessCount(System.currentTimeMillis() - startTime); } catch (Exception e) { if (this.server.isStopping() || this.server.isStopped()) { LOG.info( @@ -81,7 +80,6 @@ class SplitRequest implements Runnable { if (st.rollback(this.server, this.server)) { LOG.info("Successful rollback of failed split of " + parent.getRegionNameAsString()); - this.server.getMetrics().incrementSplitFailureCount(); } else { this.server.abort("Abort; we got an error after point-of-no-return"); } @@ -102,7 +100,6 @@ class SplitRequest implements Runnable { } catch (IOException ex) { LOG.error("Split failed " + this, RemoteExceptionHandler .checkIOException(ex)); - this.server.getMetrics().incrementSplitFailureCount(); server.checkFileSystem(); } finally { if (this.parent.getCoprocessorHost() != null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java index d391a16df74..1841eebf323 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java @@ -32,7 +32,6 @@ import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder; import org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress; import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; -import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics.SchemaAware; import com.google.common.collect.ImmutableList; @@ -42,7 +41,7 @@ import com.google.common.collect.ImmutableList; */ @InterfaceAudience.Private @InterfaceStability.Evolving -public interface Store extends SchemaAware, HeapSize { +public interface Store extends HeapSize { /* The default priority for user-specified compaction requests. * The user gets top priority unless we have blocking compactions. (Pri <= 0) @@ -287,4 +286,8 @@ public interface Store extends SchemaAware, HeapSize { * @return the parent region hosting this store */ public HRegion getHRegion(); + + public String getColumnFamilyName(); + + public String getTableName(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java index 203aecf50d6..a240f94c9ac 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java @@ -58,8 +58,6 @@ import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFileScanner; import org.apache.hadoop.hbase.io.hfile.HFileWriterV1; import org.apache.hadoop.hbase.io.hfile.HFileWriterV2; -import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; -import org.apache.hadoop.hbase.regionserver.metrics.SchemaConfigured; import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder; import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder; import org.apache.hadoop.hbase.util.ChecksumType; @@ -80,7 +78,7 @@ import com.google.common.collect.Ordering; /** * A Store data file. Stores usually have one or more of these files. They * are produced by flushing the memstore to disk. To - * create, instantiate a writer using {@link StoreFile#WriterBuilder} + * create, instantiate a writer using {@link StoreFile.WriterBuilder} * and append data. Be sure to add any metadata before calling close on the * Writer (Use the appendMetadata convenience methods). On close, a StoreFile * is sitting in the Filesystem. To refer to it, create a StoreFile instance @@ -91,7 +89,7 @@ import com.google.common.collect.Ordering; * writer and a reader is that we write once but read a lot more. */ @InterfaceAudience.LimitedPrivate("Coprocessor") -public class StoreFile extends SchemaConfigured { +public class StoreFile { static final Log LOG = LogFactory.getLog(StoreFile.class.getName()); public static enum BloomType { @@ -277,7 +275,6 @@ public class StoreFile extends SchemaConfigured { this.modificationTimeStamp = 0; } - SchemaMetrics.configureGlobally(conf); } /** @@ -545,11 +542,6 @@ public class StoreFile extends SchemaConfigured { dataBlockEncoder.getEncodingInCache()); } - if (isSchemaConfigured()) { - SchemaConfigured.resetSchemaMetricsConf(reader); - passSchemaMetricsTo(reader); - } - computeHDFSBlockDistribution(); // Load up indices and fileinfo. This also loads Bloom filter type. @@ -1287,7 +1279,7 @@ public class StoreFile extends SchemaConfigured { /** * Reader for a StoreFile. */ - public static class Reader extends SchemaConfigured { + public static class Reader { static final Log LOG = LogFactory.getLog(Reader.class.getName()); protected BloomFilter generalBloomFilter = null; @@ -1301,7 +1293,6 @@ public class StoreFile extends SchemaConfigured { public Reader(FileSystem fs, Path path, CacheConfig cacheConf, DataBlockEncoding preferredEncodingInCache) throws IOException { - super(path); reader = HFile.createReaderWithEncoding(fs, path, cacheConf, preferredEncodingInCache); bloomFilterType = BloomType.NONE; @@ -1310,7 +1301,6 @@ public class StoreFile extends SchemaConfigured { public Reader(FileSystem fs, Path path, HFileLink hfileLink, long size, CacheConfig cacheConf, DataBlockEncoding preferredEncodingInCache, boolean closeIStream) throws IOException { - super(path); FSDataInputStream in = hfileLink.open(fs); FSDataInputStream inNoChecksum = in; @@ -1584,7 +1574,6 @@ public class StoreFile extends SchemaConfigured { && bloomFilter.contains(key, 0, key.length, bloom); } - getSchemaMetrics().updateBloomMetrics(exists); return exists; } } catch (IOException e) { @@ -1728,10 +1717,6 @@ public class StoreFile extends SchemaConfigured { return reader.indexSize(); } - public String getColumnFamilyName() { - return reader.getColumnFamilyName(); - } - public BloomType getBloomFilterType() { return this.bloomFilterType; } @@ -1774,11 +1759,6 @@ public class StoreFile extends SchemaConfigured { public long getMaxTimestamp() { return timeRangeTracker.maximumTimestamp; } - - @Override - public void schemaConfigurationChanged() { - passSchemaMetricsTo((SchemaConfigured) reader); - } } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java index b595c066bac..4ccdbccff36 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java @@ -33,8 +33,6 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.regionserver.HStore.ScanInfo; -import org.apache.hadoop.hbase.regionserver.metrics.RegionMetricsStorage; -import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; @@ -110,7 +108,6 @@ public class StoreScanner extends NonLazyKeyValueScanner throws IOException { this(store, scan.getCacheBlocks(), scan, columns, scanInfo.getTtl(), scanInfo.getMinVersions()); - initializeMetricNames(); if (columns != null && scan.isRaw()) { throw new DoNotRetryIOException( "Cannot specify any column for a raw scan"); @@ -163,7 +160,6 @@ public class StoreScanner extends NonLazyKeyValueScanner long smallestReadPoint, long earliestPutTs) throws IOException { this(store, false, scan, null, scanInfo.getTtl(), scanInfo.getMinVersions()); - initializeMetricNames(); matcher = new ScanQueryMatcher(scan, scanInfo, null, scanType, smallestReadPoint, earliestPutTs, oldestUnexpiredTS); @@ -194,7 +190,6 @@ public class StoreScanner extends NonLazyKeyValueScanner throws IOException { this(null, scan.getCacheBlocks(), scan, columns, scanInfo.getTtl(), scanInfo.getMinVersions()); - this.initializeMetricNames(); this.matcher = new ScanQueryMatcher(scan, scanInfo, columns, scanType, Long.MAX_VALUE, earliestPutTs, oldestUnexpiredTS); @@ -205,23 +200,6 @@ public class StoreScanner extends NonLazyKeyValueScanner heap = new KeyValueHeap(scanners, scanInfo.getComparator()); } - /** - * Method used internally to initialize metric names throughout the - * constructors. - * - * To be called after the store variable has been initialized! - */ - private void initializeMetricNames() { - String tableName = SchemaMetrics.UNKNOWN; - String family = SchemaMetrics.UNKNOWN; - if (store != null) { - tableName = store.getTableName(); - family = Bytes.toString(store.getFamily().getName()); - } - this.metricNamePrefix = - SchemaMetrics.generateSchemaMetricsPrefix(tableName, family); - } - /** * Get a filtered list of scanners. Assumes we are not in a compaction. * @return list of scanners to seek @@ -458,8 +436,7 @@ public class StoreScanner extends NonLazyKeyValueScanner } } finally { if (cumulativeMetric > 0 && metric != null) { - RegionMetricsStorage.incrNumericMetric(this.metricNamePrefix + metric, - cumulativeMetric); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java index bee966800ca..80e4d5ee090 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java @@ -253,7 +253,6 @@ public class CompactionRequest implements Comparable, LOG.info(((completed) ? "completed" : "aborted") + " compaction: " + this + "; duration=" + StringUtils.formatTimeDiff(now, start)); if (completed) { - server.getMetrics().addCompaction(now - start, this.totalSize); // degenerate case: blocked regions require recursive enqueues if (s.getCompactPriority() <= 0) { server.compactSplitThread diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/OperationMetrics.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/OperationMetrics.java deleted file mode 100644 index bff18c33b13..00000000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/OperationMetrics.java +++ /dev/null @@ -1,225 +0,0 @@ -/* - * Copyright The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package org.apache.hadoop.hbase.regionserver.metrics; - -import java.util.Set; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.client.Append; -import org.apache.hadoop.hbase.client.Delete; -import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HTable; -import org.apache.hadoop.hbase.client.Increment; -import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.util.Bytes; - -/** - * This class provides a simplified interface to expose time varying metrics - * about GET/DELETE/PUT/ICV operations on a region and on Column Families. All - * metrics are stored in {@link RegionMetricsStorage} and exposed to hadoop - * metrics through {@link RegionServerDynamicMetrics}. - */ -@InterfaceAudience.Private -public class OperationMetrics { - - private static final String DELETE_KEY = "delete_"; - private static final String PUT_KEY = "put_"; - private static final String GET_KEY = "get_"; - private static final String ICV_KEY = "incrementColumnValue_"; - private static final String INCREMENT_KEY = "increment_"; - private static final String MULTIPUT_KEY = "multiput_"; - private static final String MULTIDELETE_KEY = "multidelete_"; - private static final String APPEND_KEY = "append_"; - - /** Conf key controlling whether we should expose metrics.*/ - private static final String CONF_KEY = - "hbase.metrics.exposeOperationTimes"; - - private String tableName = null; - private String regionName = null; - private String regionMetrixPrefix = null; - private Configuration conf = null; - - - /** - * Create a new OperationMetrics - * @param conf The Configuration of the HRegion reporting operations coming in. - * @param regionInfo The region info - */ - public OperationMetrics(Configuration conf, HRegionInfo regionInfo) { - // Configure SchemaMetrics before trying to create a RegionOperationMetrics instance as - // RegionOperationMetrics relies on SchemaMetrics to do naming. - if (conf != null) { - SchemaMetrics.configureGlobally(conf); - - this.conf = conf; - if (regionInfo != null) { - this.tableName = regionInfo.getTableNameAsString(); - this.regionName = regionInfo.getEncodedName(); - } else { - this.tableName = SchemaMetrics.UNKNOWN; - this.regionName = SchemaMetrics.UNKNOWN; - } - this.regionMetrixPrefix = - SchemaMetrics.generateRegionMetricsPrefix(this.tableName, this.regionName); - } - } - - /** - * This is used in creating a testing HRegion where the regionInfo is unknown - * @param conf - */ - public OperationMetrics() { - this(null, null); - } - - - /** - * Update the stats associated with {@link HTable#put(java.util.List)}. - * - * @param columnFamilies Set of CF's this multiput is associated with - * @param value the time - */ - public void updateMultiPutMetrics(Set columnFamilies, long value) { - doUpdateTimeVarying(columnFamilies, MULTIPUT_KEY, value); - } - - /** - * Update the stats associated with {@link HTable#delete(java.util.List)}. - * - * @param columnFamilies Set of CF's this multidelete is associated with - * @param value the time - */ - public void updateMultiDeleteMetrics(Set columnFamilies, long value) { - doUpdateTimeVarying(columnFamilies, MULTIDELETE_KEY, value); - } - - /** - * Update the metrics associated with a {@link Get} - * - * @param columnFamilies - * Set of Column Families in this get. - * @param value - * the time - */ - public void updateGetMetrics(Set columnFamilies, long value) { - doUpdateTimeVarying(columnFamilies, GET_KEY, value); - } - - /** - * Update metrics associated with an {@link Increment} - * @param columnFamilies - * @param value - */ - public void updateIncrementMetrics(Set columnFamilies, long value) { - doUpdateTimeVarying(columnFamilies, INCREMENT_KEY, value); - } - - - /** - * Update the metrics associated with an {@link Append} - * @param columnFamilies - * @param value - */ - public void updateAppendMetrics(Set columnFamilies, long value) { - doUpdateTimeVarying(columnFamilies, APPEND_KEY, value); - } - - - /** - * Update the metrics associated with - * {@link HTable#incrementColumnValue(byte[], byte[], byte[], long)} - * - * @param columnFamily - * The single column family associated with an ICV - * @param value - * the time - */ - public void updateIncrementColumnValueMetrics(byte[] columnFamily, long value) { - String cfMetricPrefix = - SchemaMetrics.generateSchemaMetricsPrefix(this.tableName, Bytes.toString(columnFamily)); - doSafeIncTimeVarying(cfMetricPrefix, ICV_KEY, value); - doSafeIncTimeVarying(this.regionMetrixPrefix, ICV_KEY, value); - } - - /** - * update metrics associated with a {@link Put} - * - * @param columnFamilies - * Set of column families involved. - * @param value - * the time. - */ - public void updatePutMetrics(Set columnFamilies, long value) { - doUpdateTimeVarying(columnFamilies, PUT_KEY, value); - } - - /** - * update metrics associated with a {@link Delete} - * - * @param columnFamilies - * @param value - * the time. - */ - public void updateDeleteMetrics(Set columnFamilies, long value) { - doUpdateTimeVarying(columnFamilies, DELETE_KEY, value); - } - - /** - * This deletes all old metrics this instance has ever created or updated. - */ - public void closeMetrics() { - RegionMetricsStorage.clear(); - } - - /** - * Method to send updates for cf and region metrics. This is the normal method - * used if the naming of stats and CF's are in line with put/delete/multiput. - * - * @param columnFamilies - * the set of column families involved. - * @param key - * the metric name. - * @param value - * the time. - */ - private void doUpdateTimeVarying(Set columnFamilies, String key, long value) { - String cfPrefix = null; - if (columnFamilies != null) { - cfPrefix = SchemaMetrics.generateSchemaMetricsPrefix(tableName, columnFamilies); - } else { - cfPrefix = SchemaMetrics.generateSchemaMetricsPrefix(tableName, SchemaMetrics.UNKNOWN); - } - - doSafeIncTimeVarying(cfPrefix, key, value); - doSafeIncTimeVarying(this.regionMetrixPrefix, key, value); - } - - private void doSafeIncTimeVarying(String prefix, String key, long value) { - if (conf.getBoolean(CONF_KEY, true)) { - if (prefix != null && !prefix.isEmpty() && key != null && !key.isEmpty()) { - String m = prefix + key; - RegionMetricsStorage.incrTimeVaryingMetric(m, value); - } - } - } - -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionMetricsStorage.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionMetricsStorage.java deleted file mode 100644 index 5d4beffc2e2..00000000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionMetricsStorage.java +++ /dev/null @@ -1,138 +0,0 @@ -/* - * Copyright The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ - -package org.apache.hadoop.hbase.regionserver.metrics; - -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hbase.util.Pair; - -/** - * This class if for maintaining the maps used to power metrics for hfiles, - * regions, and regionservers. It has methods to mutate and get state of metrics - * numbers. These numbers are exposed to Hadoop metrics through - * RegionServerDynamicMetrics. - */ -@InterfaceAudience.Private -public class RegionMetricsStorage { - - // for simple numeric metrics (# of blocks read from block cache) - private static final ConcurrentMap numericMetrics = - new ConcurrentHashMap(); - - // for simple numeric metrics (current block cache size) - // These ones are not reset to zero when queried, unlike the previous. - private static final ConcurrentMap numericPersistentMetrics = - new ConcurrentHashMap(); - - /** - * Used for metrics where we want track a metrics (such as latency) over a - * number of operations. - */ - private static final ConcurrentMap> timeVaryingMetrics = - new ConcurrentHashMap>(); - - public static Map getNumericMetrics() { - return numericMetrics; - } - - public static Map getNumericPersistentMetrics() { - return numericPersistentMetrics; - } - - public static Map> getTimeVaryingMetrics() { - return timeVaryingMetrics; - } - - public static void incrNumericMetric(String key, long amount) { - AtomicLong oldVal = numericMetrics.get(key); - if (oldVal == null) { - oldVal = numericMetrics.putIfAbsent(key, new AtomicLong(amount)); - if (oldVal == null) - return; - } - oldVal.addAndGet(amount); - } - - public static void incrTimeVaryingMetric(String key, long amount) { - Pair oldVal = timeVaryingMetrics.get(key); - if (oldVal == null) { - oldVal = - timeVaryingMetrics.putIfAbsent(key, - new Pair( - new AtomicLong(amount), - new AtomicInteger(1))); - if (oldVal == null) - return; - } - oldVal.getFirst().addAndGet(amount); // total time - oldVal.getSecond().incrementAndGet(); // increment ops by 1 - } - - public static void incrNumericPersistentMetric(String key, long amount) { - AtomicLong oldVal = numericPersistentMetrics.get(key); - if (oldVal == null) { - oldVal = numericPersistentMetrics.putIfAbsent(key, new AtomicLong(amount)); - if (oldVal == null) - return; - } - oldVal.addAndGet(amount); - } - - public static void setNumericMetric(String key, long amount) { - numericMetrics.put(key, new AtomicLong(amount)); - } - - public static long getNumericMetric(String key) { - AtomicLong m = numericMetrics.get(key); - if (m == null) - return 0; - return m.get(); - } - - public static Pair getTimeVaryingMetric(String key) { - Pair pair = timeVaryingMetrics.get(key); - if (pair == null) { - return new Pair(0L, 0); - } - - return new Pair(pair.getFirst().get(), pair.getSecond().get()); - } - - public static long getNumericPersistentMetric(String key) { - AtomicLong m = numericPersistentMetrics.get(key); - if (m == null) - return 0; - return m.get(); - } - - /** - * Clear all copies of the metrics this stores. - */ - public static void clear() { - timeVaryingMetrics.clear(); - numericMetrics.clear(); - numericPersistentMetrics.clear(); - } -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerDynamicMetrics.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerDynamicMetrics.java deleted file mode 100644 index bb06a10d3f4..00000000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerDynamicMetrics.java +++ /dev/null @@ -1,230 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.regionserver.metrics; - -import java.lang.reflect.Field; -import java.lang.reflect.Method; -import java.util.Map; -import java.util.Map.Entry; -import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.atomic.AtomicInteger; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hbase.regionserver.HRegion; -import org.apache.hadoop.hbase.util.Pair; -import org.apache.hadoop.metrics.MetricsContext; -import org.apache.hadoop.metrics.MetricsRecord; -import org.apache.hadoop.metrics.MetricsUtil; -import org.apache.hadoop.metrics.Updater; -import org.apache.hadoop.metrics.util.MetricsBase; -import org.apache.hadoop.metrics.util.MetricsLongValue; -import org.apache.hadoop.metrics.util.MetricsRegistry; -import org.apache.hadoop.metrics.util.MetricsTimeVaryingRate; - -/** - * - * This class is for maintaining the various RPC statistics - * and publishing them through the metrics interfaces. - * This also registers the JMX MBean for RPC. - *

    - * This class has a number of metrics variables that are publicly accessible; - * these variables (objects) have methods to update their values; - * for example: rpcQueueTime.inc(time) - * - */ -@InterfaceAudience.Private -public class RegionServerDynamicMetrics implements Updater { - private static final String UNABLE_TO_CLEAR = "Unable to clear RegionServerDynamicMetrics"; - - private MetricsRecord metricsRecord; - private MetricsContext context; - private final RegionServerDynamicStatistics rsDynamicStatistics; - private Method updateMbeanInfoIfMetricsListChanged = null; - private static final Log LOG = - LogFactory.getLog(RegionServerDynamicStatistics.class); - - private boolean reflectionInitialized = false; - private boolean needsUpdateMessage = false; - private Field recordMetricMapField; - private Field registryMetricMapField; - - /** - * The metrics variables are public: - * - they can be set directly by calling their set/inc methods - * -they can also be read directly - e.g. JMX does this. - */ - public final MetricsRegistry registry = new MetricsRegistry(); - - private RegionServerDynamicMetrics() { - this.context = MetricsUtil.getContext("hbase-dynamic"); - this.metricsRecord = MetricsUtil.createRecord( - this.context, - "RegionServerDynamicStatistics"); - context.registerUpdater(this); - this.rsDynamicStatistics = new RegionServerDynamicStatistics(this.registry); - try { - updateMbeanInfoIfMetricsListChanged = - this.rsDynamicStatistics.getClass().getSuperclass() - .getDeclaredMethod("updateMbeanInfoIfMetricsListChanged", - new Class[]{}); - updateMbeanInfoIfMetricsListChanged.setAccessible(true); - } catch (Exception e) { - LOG.error(e); - } - } - - public static RegionServerDynamicMetrics newInstance() { - RegionServerDynamicMetrics metrics = - new RegionServerDynamicMetrics(); - return metrics; - } - - public synchronized void setNumericMetric(String name, long amt) { - MetricsLongValue m = (MetricsLongValue)registry.get(name); - if (m == null) { - m = new MetricsLongValue(name, this.registry); - this.needsUpdateMessage = true; - } - m.set(amt); - } - - public synchronized void incrTimeVaryingMetric( - String name, - long amt, - int numOps) { - MetricsTimeVaryingRate m = (MetricsTimeVaryingRate)registry.get(name); - if (m == null) { - m = new MetricsTimeVaryingRate(name, this.registry); - this.needsUpdateMessage = true; - } - if (numOps > 0) { - m.inc(numOps, amt); - } - } - - /** - * Clear all metrics this exposes. - * Uses reflection to clear them from hadoop metrics side as well. - */ - @SuppressWarnings("rawtypes") - public void clear() { - this.needsUpdateMessage = true; - // If this is the first clear use reflection to get the two maps that hold copies of our - // metrics on the hadoop metrics side. We have to use reflection because there is not - // remove metrics on the hadoop side. If we can't get them then clearing old metrics - // is not possible and bailing out early is our best option. - if (!this.reflectionInitialized) { - this.reflectionInitialized = true; - try { - this.recordMetricMapField = this.metricsRecord.getClass().getDeclaredField("metricTable"); - this.recordMetricMapField.setAccessible(true); - } catch (SecurityException e) { - LOG.debug(UNABLE_TO_CLEAR); - return; - } catch (NoSuchFieldException e) { - LOG.debug(UNABLE_TO_CLEAR); - return; - } - - try { - this.registryMetricMapField = this.registry.getClass().getDeclaredField("metricsList"); - this.registryMetricMapField.setAccessible(true); - } catch (SecurityException e) { - LOG.debug(UNABLE_TO_CLEAR); - return; - } catch (NoSuchFieldException e) { - LOG.debug(UNABLE_TO_CLEAR); - return; - } - } - - - //If we found both fields then try and clear the maps. - if (this.recordMetricMapField != null && this.registryMetricMapField != null) { - try { - Map recordMap = (Map) this.recordMetricMapField.get(this.metricsRecord); - recordMap.clear(); - Map registryMap = (Map) this.registryMetricMapField.get(this.registry); - registryMap.clear(); - } catch (IllegalArgumentException e) { - LOG.debug(UNABLE_TO_CLEAR); - } catch (IllegalAccessException e) { - LOG.debug(UNABLE_TO_CLEAR); - } - } else { - LOG.debug(UNABLE_TO_CLEAR); - } - } - - /** - * Push the metrics to the monitoring subsystem on doUpdate() call. - * @param context ctx - */ - public void doUpdates(MetricsContext context) { - /* get dynamically created numeric metrics, and push the metrics */ - for (Entry entry : RegionMetricsStorage.getNumericMetrics().entrySet()) { - this.setNumericMetric(entry.getKey(), entry.getValue().getAndSet(0)); - } - /* get dynamically created numeric metrics, and push the metrics. - * These ones aren't to be reset; they are cumulative. */ - for (Entry entry : RegionMetricsStorage.getNumericPersistentMetrics().entrySet()) { - this.setNumericMetric(entry.getKey(), entry.getValue().get()); - } - /* get dynamically created time varying metrics, and push the metrics */ - for (Entry> entry : - RegionMetricsStorage.getTimeVaryingMetrics().entrySet()) { - Pair value = entry.getValue(); - this.incrTimeVaryingMetric(entry.getKey(), - value.getFirst().getAndSet(0), - value.getSecond().getAndSet(0)); - } - - // If there are new metrics sending this message to jmx tells it to update everything. - // This is not ideal we should just move to metrics2 that has full support for dynamic metrics. - if (needsUpdateMessage) { - try { - if (updateMbeanInfoIfMetricsListChanged != null) { - updateMbeanInfoIfMetricsListChanged.invoke(this.rsDynamicStatistics, - new Object[]{}); - } - } catch (Exception e) { - LOG.error(e); - } - needsUpdateMessage = false; - } - - - synchronized (registry) { - // Iterate through the registry to propagate the different rpc metrics. - for (String metricName : registry.getKeyList() ) { - MetricsBase value = registry.get(metricName); - value.pushMetric(metricsRecord); - } - } - metricsRecord.update(); - } - - public void shutdown() { - if (rsDynamicStatistics != null) - rsDynamicStatistics.shutdown(); - } -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerDynamicStatistics.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerDynamicStatistics.java deleted file mode 100644 index b4df6a76e63..00000000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerDynamicStatistics.java +++ /dev/null @@ -1,48 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.regionserver.metrics; - -import javax.management.ObjectName; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hbase.metrics.MetricsMBeanBase; -import org.apache.hadoop.metrics.util.MBeanUtil; -import org.apache.hadoop.metrics.util.MetricsRegistry; - -/** - * Exports dynamic region server metric recorded in - * {@link RegionServerDynamicMetrics} as an MBean - * for JMX monitoring. - */ -@InterfaceAudience.Private -public class RegionServerDynamicStatistics extends MetricsMBeanBase { - private final ObjectName mbeanName; - - public RegionServerDynamicStatistics(MetricsRegistry registry) { - super(registry, "RegionServerDynamicStatistics"); - mbeanName = MBeanUtil.registerMBean("RegionServer", "RegionServerDynamicStatistics", this); - } - - public void shutdown() { - if (mbeanName != null) - MBeanUtil.unregisterMBean(mbeanName); - } - -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java deleted file mode 100644 index d8883e93062..00000000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java +++ /dev/null @@ -1,626 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.regionserver.metrics; - -import java.io.IOException; -import java.lang.management.ManagementFactory; -import java.lang.management.MemoryUsage; -import java.util.List; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hbase.io.hfile.HFile; -import org.apache.hadoop.hbase.metrics.ExactCounterMetric; -import org.apache.hadoop.hbase.metrics.HBaseInfo; -import org.apache.hadoop.hbase.metrics.MetricsRate; -import org.apache.hadoop.hbase.metrics.histogram.MetricsHistogram; -import org.apache.hadoop.hbase.metrics.PersistentMetricsTimeVaryingRate; -import com.yammer.metrics.stats.Snapshot; -import org.apache.hadoop.hbase.regionserver.wal.HLogMetrics; -import org.apache.hadoop.hbase.util.Pair; -import org.apache.hadoop.hbase.util.Strings; -import org.apache.hadoop.metrics.ContextFactory; -import org.apache.hadoop.metrics.MetricsContext; -import org.apache.hadoop.metrics.MetricsRecord; -import org.apache.hadoop.metrics.MetricsUtil; -import org.apache.hadoop.metrics.Updater; -import org.apache.hadoop.metrics.jvm.JvmMetrics; -import org.apache.hadoop.metrics.util.MetricsIntValue; -import org.apache.hadoop.metrics.util.MetricsLongValue; -import org.apache.hadoop.metrics.util.MetricsRegistry; -import org.apache.hadoop.metrics.util.MetricsTimeVaryingRate; -import org.apache.hadoop.metrics.util.MetricsTimeVaryingLong; -import org.apache.hadoop.util.StringUtils; - -/** - * This class is for maintaining the various regionserver statistics - * and publishing them through the metrics interfaces. - *

    - * This class has a number of metrics variables that are publicly accessible; - * these variables (objects) have methods to update their values. - */ -@InterfaceAudience.Private -public class RegionServerMetrics implements Updater { - @SuppressWarnings({"FieldCanBeLocal"}) - private final Log LOG = LogFactory.getLog(this.getClass()); - private final MetricsRecord metricsRecord; - private long lastUpdate = System.currentTimeMillis(); - private long lastExtUpdate = System.currentTimeMillis(); - private long extendedPeriod = 0; - private static final int MB = 1024*1024; - private MetricsRegistry registry = new MetricsRegistry(); - private final RegionServerStatistics statistics; - - public final MetricsTimeVaryingRate atomicIncrementTime = - new MetricsTimeVaryingRate("atomicIncrementTime", registry); - - /** - * Count of regions carried by this regionserver - */ - public final MetricsIntValue regions = - new MetricsIntValue("regions", registry); - - /** - * Block cache size. - */ - public final MetricsLongValue blockCacheSize = - new MetricsLongValue("blockCacheSize", registry); - - /** - * Block cache free size. - */ - public final MetricsLongValue blockCacheFree = - new MetricsLongValue("blockCacheFree", registry); - - /** - * Block cache item count. - */ - public final MetricsLongValue blockCacheCount = - new MetricsLongValue("blockCacheCount", registry); - - /** - * Block cache hit count. - */ - public final MetricsLongValue blockCacheHitCount = - new MetricsLongValue("blockCacheHitCount", registry); - - /** - * Block cache miss count. - */ - public final MetricsLongValue blockCacheMissCount = - new MetricsLongValue("blockCacheMissCount", registry); - - /** - * Block cache evict count. - */ - public final MetricsLongValue blockCacheEvictedCount = - new MetricsLongValue("blockCacheEvictedCount", registry); - - /** - * Block hit ratio. - */ - public final MetricsIntValue blockCacheHitRatio = - new MetricsIntValue("blockCacheHitRatio", registry); - - /** - * Block hit caching ratio. This only includes the requests to the block - * cache where caching was turned on. See HBASE-2253. - */ - public final MetricsIntValue blockCacheHitCachingRatio = - new MetricsIntValue("blockCacheHitCachingRatio", registry); - - /** Block hit ratio for past N periods. */ - public final MetricsIntValue blockCacheHitRatioPastNPeriods = new MetricsIntValue("blockCacheHitRatioPastNPeriods", registry); - - /** Block hit caching ratio for past N periods */ - public final MetricsIntValue blockCacheHitCachingRatioPastNPeriods = new MetricsIntValue("blockCacheHitCachingRatioPastNPeriods", registry); - - /* - * Count of requests to the regionservers since last call to metrics update - */ - public final MetricsRate requests = new MetricsRate("requests", registry); - - /** - * Count of stores open on the regionserver. - */ - public final MetricsIntValue stores = new MetricsIntValue("stores", registry); - - /** - * Count of storefiles open on the regionserver. - */ - public final MetricsIntValue storefiles = - new MetricsIntValue("storefiles", registry); - - /** - * Count of read requests - */ - public final MetricsLongValue readRequestsCount = - new MetricsLongValue("readRequestsCount", registry); - - /** - * Count of write requests - */ - public final MetricsLongValue writeRequestsCount = - new MetricsLongValue("writeRequestsCount", registry); - - /** - * Count of checkAndMutates the failed the check - */ - public final MetricsLongValue checkAndMutateChecksFailed = - new MetricsLongValue("checkAndMutateChecksFailed", registry); - - /** - * Count of checkAndMutates that passed the check - */ - public final MetricsLongValue checkAndMutateChecksPassed = - new MetricsLongValue("checkAndMutateChecksPassed", registry); - /** - */ - public final MetricsIntValue storefileIndexSizeMB = - new MetricsIntValue("storefileIndexSizeMB", registry); - - /** The total size of block index root levels in this regionserver in KB. */ - public final MetricsIntValue rootIndexSizeKB = - new MetricsIntValue("rootIndexSizeKB", registry); - - /** Total size of all block indexes (not necessarily loaded in memory) */ - public final MetricsIntValue totalStaticIndexSizeKB = - new MetricsIntValue("totalStaticIndexSizeKB", registry); - - /** Total size of all Bloom filters (not necessarily loaded in memory) */ - public final MetricsIntValue totalStaticBloomSizeKB = - new MetricsIntValue("totalStaticBloomSizeKB", registry); - - /** - * HDFS blocks locality index - */ - public final MetricsIntValue hdfsBlocksLocalityIndex = - new MetricsIntValue("hdfsBlocksLocalityIndex", registry); - - /** - * Sum of all the memstore sizes in this regionserver in MB - */ - public final MetricsIntValue memstoreSizeMB = - new MetricsIntValue("memstoreSizeMB", registry); - - /** - * Number of put with WAL disabled in this regionserver in MB - */ - public final MetricsLongValue numPutsWithoutWAL = - new MetricsLongValue("numPutsWithoutWAL", registry); - - /** - * Possible data loss sizes (due to put with WAL disabled) in this regionserver in MB - */ - public final MetricsIntValue mbInMemoryWithoutWAL = - new MetricsIntValue("mbInMemoryWithoutWAL", registry); - - /** - * Size of the compaction queue. - */ - public final MetricsIntValue compactionQueueSize = - new MetricsIntValue("compactionQueueSize", registry); - - /** - * Size of the flush queue. - */ - public final MetricsIntValue flushQueueSize = - new MetricsIntValue("flushQueueSize", registry); - - /** - * filesystem sequential read latency distribution - */ - public final MetricsHistogram fsReadLatencyHistogram = - new MetricsHistogram("fsReadLatencyHistogram", registry); - - /** - * filesystem pread latency distribution - */ - public final MetricsHistogram fsPreadLatencyHistogram = - new MetricsHistogram("fsPreadLatencyHistogram", registry); - - /** - * Metrics on the distribution of filesystem write latencies (improved version of fsWriteLatency) - */ - public final MetricsHistogram fsWriteLatencyHistogram = - new MetricsHistogram("fsWriteLatencyHistogram", registry); - - - /** - * filesystem read latency - */ - public final MetricsTimeVaryingRate fsReadLatency = - new MetricsTimeVaryingRate("fsReadLatency", registry); - - /** - * filesystem positional read latency - */ - public final MetricsTimeVaryingRate fsPreadLatency = - new MetricsTimeVaryingRate("fsPreadLatency", registry); - - /** - * filesystem write latency - */ - public final MetricsTimeVaryingRate fsWriteLatency = - new MetricsTimeVaryingRate("fsWriteLatency", registry); - - /** - * size (in bytes) of data in HLog append calls - */ - public final MetricsTimeVaryingRate fsWriteSize = - new MetricsTimeVaryingRate("fsWriteSize", registry); - - /** - * filesystem sync latency - */ - public final MetricsTimeVaryingRate fsSyncLatency = - new MetricsTimeVaryingRate("fsSyncLatency", registry); - - - /** - * time each scheduled compaction takes - */ - protected final MetricsHistogram compactionTime = - new MetricsHistogram("compactionTime", registry); - - protected final MetricsHistogram compactionSize = - new MetricsHistogram("compactionSize", registry); - - /** - * time each scheduled flush takes - */ - protected final MetricsHistogram flushTime = - new MetricsHistogram("flushTime", registry); - - protected final MetricsHistogram flushSize = - new MetricsHistogram("flushSize", registry); - - public final MetricsLongValue slowHLogAppendCount = - new MetricsLongValue("slowHLogAppendCount", registry); - - public final MetricsTimeVaryingRate slowHLogAppendTime = - new MetricsTimeVaryingRate("slowHLogAppendTime", registry); - - public final PersistentMetricsTimeVaryingRate regionSplitSuccessCount = - new PersistentMetricsTimeVaryingRate("regionSplitSuccessCount", registry); - - public final MetricsLongValue regionSplitFailureCount = - new MetricsLongValue("regionSplitFailureCount", registry); - - /** - * Number of times checksum verification failed. - */ - public final MetricsLongValue checksumFailuresCount = - new MetricsLongValue("checksumFailuresCount", registry); - - /** - * time blocked on lack of resources - */ - public final MetricsHistogram updatesBlockedSeconds = new MetricsHistogram( - "updatesBlockedSeconds", registry); - - /** - * time blocked on memstoreHW - */ - public final MetricsHistogram updatesBlockedSecondsHighWater = new MetricsHistogram( - "updatesBlockedSecondsHighWater",registry); - - public RegionServerMetrics() { - MetricsContext context = MetricsUtil.getContext("hbase"); - metricsRecord = MetricsUtil.createRecord(context, "regionserver"); - String name = Thread.currentThread().getName(); - metricsRecord.setTag("RegionServer", name); - context.registerUpdater(this); - // Add jvmmetrics. - JvmMetrics.init("RegionServer", name); - // Add Hbase Info metrics - HBaseInfo.init(); - - // export for JMX - statistics = new RegionServerStatistics(this.registry, name); - - // get custom attributes - try { - Object m = ContextFactory.getFactory().getAttribute("hbase.extendedperiod"); - if (m instanceof String) { - this.extendedPeriod = Long.parseLong((String) m)*1000; - } - } catch (IOException ioe) { - LOG.info("Couldn't load ContextFactory for Metrics config info"); - } - - LOG.info("Initialized"); - } - - public void shutdown() { - if (statistics != null) - statistics.shutdown(); - } - - /** - * Since this object is a registered updater, this method will be called - * periodically, e.g. every 5 seconds. - * @param caller the metrics context that this responsible for calling us - */ - public void doUpdates(MetricsContext caller) { - synchronized (this) { - this.lastUpdate = System.currentTimeMillis(); - - // has the extended period for long-living stats elapsed? - if (this.extendedPeriod > 0 && - this.lastUpdate - this.lastExtUpdate >= this.extendedPeriod) { - this.lastExtUpdate = this.lastUpdate; - this.compactionTime.clear(); - this.compactionSize.clear(); - this.flushTime.clear(); - this.flushSize.clear(); - this.resetAllMinMax(); - } - - this.stores.pushMetric(this.metricsRecord); - this.storefiles.pushMetric(this.metricsRecord); - this.storefileIndexSizeMB.pushMetric(this.metricsRecord); - this.rootIndexSizeKB.pushMetric(this.metricsRecord); - this.totalStaticIndexSizeKB.pushMetric(this.metricsRecord); - this.totalStaticBloomSizeKB.pushMetric(this.metricsRecord); - this.memstoreSizeMB.pushMetric(this.metricsRecord); - this.mbInMemoryWithoutWAL.pushMetric(this.metricsRecord); - this.numPutsWithoutWAL.pushMetric(this.metricsRecord); - this.readRequestsCount.pushMetric(this.metricsRecord); - this.writeRequestsCount.pushMetric(this.metricsRecord); - this.regions.pushMetric(this.metricsRecord); - this.requests.pushMetric(this.metricsRecord); - this.compactionQueueSize.pushMetric(this.metricsRecord); - this.flushQueueSize.pushMetric(this.metricsRecord); - this.blockCacheSize.pushMetric(this.metricsRecord); - this.blockCacheFree.pushMetric(this.metricsRecord); - this.blockCacheCount.pushMetric(this.metricsRecord); - this.blockCacheHitCount.pushMetric(this.metricsRecord); - this.blockCacheMissCount.pushMetric(this.metricsRecord); - this.blockCacheEvictedCount.pushMetric(this.metricsRecord); - this.blockCacheHitRatio.pushMetric(this.metricsRecord); - this.blockCacheHitCachingRatio.pushMetric(this.metricsRecord); - this.hdfsBlocksLocalityIndex.pushMetric(this.metricsRecord); - this.blockCacheHitRatioPastNPeriods.pushMetric(this.metricsRecord); - this.blockCacheHitCachingRatioPastNPeriods.pushMetric(this.metricsRecord); - - // Mix in HFile and HLog metrics - // Be careful. Here is code for MTVR from up in hadoop: - // public synchronized void inc(final int numOps, final long time) { - // currentData.numOperations += numOps; - // currentData.time += time; - // long timePerOps = time/numOps; - // minMax.update(timePerOps); - // } - // Means you can't pass a numOps of zero or get a ArithmeticException / by zero. - // HLog metrics - addHLogMetric(HLogMetrics.getWriteTime(), this.fsWriteLatency); - addHLogMetric(HLogMetrics.getWriteSize(), this.fsWriteSize); - addHLogMetric(HLogMetrics.getSyncTime(), this.fsSyncLatency); - addHLogMetric(HLogMetrics.getSlowAppendTime(), this.slowHLogAppendTime); - this.slowHLogAppendCount.set(HLogMetrics.getSlowAppendCount()); - // HFile metrics, sequential reads - int ops = HFile.getReadOps(); - if (ops != 0) this.fsReadLatency.inc(ops, HFile.getReadTimeMs()); - // HFile metrics, positional reads - ops = HFile.getPreadOps(); - if (ops != 0) this.fsPreadLatency.inc(ops, HFile.getPreadTimeMs()); - this.checksumFailuresCount.set(HFile.getChecksumFailuresCount()); - - /* NOTE: removed HFile write latency. 2 reasons: - * 1) Mixing HLog latencies are far higher priority since they're - * on-demand and HFile is used in background (compact/flush) - * 2) HFile metrics are being handled at a higher level - * by compaction & flush metrics. - */ - - for(Long latency : HFile.getReadLatenciesNanos()) { - this.fsReadLatencyHistogram.update(latency); - } - for(Long latency : HFile.getPreadLatenciesNanos()) { - this.fsPreadLatencyHistogram.update(latency); - } - for(Long latency : HFile.getWriteLatenciesNanos()) { - this.fsWriteLatencyHistogram.update(latency); - } - - - // push the result - this.fsPreadLatency.pushMetric(this.metricsRecord); - this.fsReadLatency.pushMetric(this.metricsRecord); - this.fsWriteLatency.pushMetric(this.metricsRecord); - this.fsWriteSize.pushMetric(this.metricsRecord); - - this.fsReadLatencyHistogram.pushMetric(this.metricsRecord); - this.fsWriteLatencyHistogram.pushMetric(this.metricsRecord); - this.fsPreadLatencyHistogram.pushMetric(this.metricsRecord); - - this.fsSyncLatency.pushMetric(this.metricsRecord); - this.compactionTime.pushMetric(this.metricsRecord); - this.compactionSize.pushMetric(this.metricsRecord); - this.flushTime.pushMetric(this.metricsRecord); - this.flushSize.pushMetric(this.metricsRecord); - this.slowHLogAppendCount.pushMetric(this.metricsRecord); - this.regionSplitSuccessCount.pushMetric(this.metricsRecord); - this.regionSplitFailureCount.pushMetric(this.metricsRecord); - this.checksumFailuresCount.pushMetric(this.metricsRecord); - this.updatesBlockedSeconds.pushMetric(this.metricsRecord); - this.updatesBlockedSecondsHighWater.pushMetric(this.metricsRecord); - } - this.metricsRecord.update(); - } - - private void addHLogMetric(HLogMetrics.Metric logMetric, - MetricsTimeVaryingRate hadoopMetric) { - if (logMetric.count > 0) - hadoopMetric.inc(logMetric.min); - if (logMetric.count > 1) - hadoopMetric.inc(logMetric.max); - if (logMetric.count > 2) { - int ops = logMetric.count - 2; - hadoopMetric.inc(ops, logMetric.total - logMetric.max - logMetric.min); - } - } - - public void resetAllMinMax() { - this.atomicIncrementTime.resetMinMax(); - this.fsReadLatency.resetMinMax(); - this.fsWriteLatency.resetMinMax(); - this.fsWriteSize.resetMinMax(); - this.fsSyncLatency.resetMinMax(); - this.slowHLogAppendTime.resetMinMax(); - } - - /** - * @return Count of requests. - */ - public float getRequests() { - return this.requests.getPreviousIntervalValue(); - } - - /** - * @param time time that compaction took - * @param size bytesize of storefiles in the compaction - */ - public synchronized void addCompaction(long time, long size) { - this.compactionTime.update(time); - this.compactionSize.update(size); - } - - /** - * @param flushes history in - */ - public synchronized void addFlush(final List> flushes) { - for (Pair f : flushes) { - this.flushTime.update(f.getFirst()); - this.flushSize.update(f.getSecond()); - } - } - - /** - * @param inc How much to add to requests. - */ - public void incrementRequests(final int inc) { - this.requests.inc(inc); - } - - public void incrementSplitSuccessCount(long time) { - this.regionSplitSuccessCount.inc(time); - } - - public void incrementSplitFailureCount() { - this.regionSplitFailureCount.set(this.regionSplitFailureCount.get() + 1); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb = Strings.appendKeyValue(sb, "requestsPerSecond", Integer - .valueOf((int) this.requests.getPreviousIntervalValue())); - sb = Strings.appendKeyValue(sb, "numberOfOnlineRegions", - Integer.valueOf(this.regions.get())); - sb = Strings.appendKeyValue(sb, "numberOfStores", - Integer.valueOf(this.stores.get())); - sb = Strings.appendKeyValue(sb, "numberOfStorefiles", - Integer.valueOf(this.storefiles.get())); - sb = Strings.appendKeyValue(sb, this.storefileIndexSizeMB.getName(), - Integer.valueOf(this.storefileIndexSizeMB.get())); - sb = Strings.appendKeyValue(sb, "rootIndexSizeKB", - Integer.valueOf(this.rootIndexSizeKB.get())); - sb = Strings.appendKeyValue(sb, "totalStaticIndexSizeKB", - Integer.valueOf(this.totalStaticIndexSizeKB.get())); - sb = Strings.appendKeyValue(sb, "totalStaticBloomSizeKB", - Integer.valueOf(this.totalStaticBloomSizeKB.get())); - sb = Strings.appendKeyValue(sb, this.memstoreSizeMB.getName(), - Integer.valueOf(this.memstoreSizeMB.get())); - sb = Strings.appendKeyValue(sb, "mbInMemoryWithoutWAL", - Integer.valueOf(this.mbInMemoryWithoutWAL.get())); - sb = Strings.appendKeyValue(sb, "numberOfPutsWithoutWAL", - Long.valueOf(this.numPutsWithoutWAL.get())); - sb = Strings.appendKeyValue(sb, "readRequestsCount", - Long.valueOf(this.readRequestsCount.get())); - sb = Strings.appendKeyValue(sb, "writeRequestsCount", - Long.valueOf(this.writeRequestsCount.get())); - sb = Strings.appendKeyValue(sb, "compactionQueueSize", - Integer.valueOf(this.compactionQueueSize.get())); - sb = Strings.appendKeyValue(sb, "flushQueueSize", - Integer.valueOf(this.flushQueueSize.get())); - // Duplicate from jvmmetrics because metrics are private there so - // inaccessible. - MemoryUsage memory = - ManagementFactory.getMemoryMXBean().getHeapMemoryUsage(); - sb = Strings.appendKeyValue(sb, "usedHeapMB", - Long.valueOf(memory.getUsed()/MB)); - sb = Strings.appendKeyValue(sb, "maxHeapMB", - Long.valueOf(memory.getMax()/MB)); - sb = Strings.appendKeyValue(sb, this.blockCacheSize.getName()+"MB", - StringUtils.limitDecimalTo2((float)this.blockCacheSize.get()/MB)); - sb = Strings.appendKeyValue(sb, this.blockCacheFree.getName()+"MB", - StringUtils.limitDecimalTo2((float)this.blockCacheFree.get()/MB)); - sb = Strings.appendKeyValue(sb, this.blockCacheCount.getName(), - Long.valueOf(this.blockCacheCount.get())); - sb = Strings.appendKeyValue(sb, this.blockCacheHitCount.getName(), - Long.valueOf(this.blockCacheHitCount.get())); - sb = Strings.appendKeyValue(sb, this.blockCacheMissCount.getName(), - Long.valueOf(this.blockCacheMissCount.get())); - sb = Strings.appendKeyValue(sb, this.blockCacheEvictedCount.getName(), - Long.valueOf(this.blockCacheEvictedCount.get())); - sb = Strings.appendKeyValue(sb, this.blockCacheHitRatio.getName(), - Long.valueOf(this.blockCacheHitRatio.get())+"%"); - sb = Strings.appendKeyValue(sb, this.blockCacheHitCachingRatio.getName(), - Long.valueOf(this.blockCacheHitCachingRatio.get())+"%"); - sb = Strings.appendKeyValue(sb, this.hdfsBlocksLocalityIndex.getName(), - Long.valueOf(this.hdfsBlocksLocalityIndex.get())); - sb = Strings.appendKeyValue(sb, "slowHLogAppendCount", - Long.valueOf(this.slowHLogAppendCount.get())); - sb = appendHistogram(sb, this.fsReadLatencyHistogram); - sb = appendHistogram(sb, this.fsPreadLatencyHistogram); - sb = appendHistogram(sb, this.fsWriteLatencyHistogram); - - return sb.toString(); - } - - private StringBuilder appendHistogram(StringBuilder sb, - MetricsHistogram histogram) { - sb = Strings.appendKeyValue(sb, - histogram.getName() + "Mean", - StringUtils.limitDecimalTo2(histogram.getMean())); - sb = Strings.appendKeyValue(sb, - histogram.getName() + "Count", - StringUtils.limitDecimalTo2(histogram.getCount())); - final Snapshot s = histogram.getSnapshot(); - sb = Strings.appendKeyValue(sb, - histogram.getName() + "Median", - StringUtils.limitDecimalTo2(s.getMedian())); - sb = Strings.appendKeyValue(sb, - histogram.getName() + "75th", - StringUtils.limitDecimalTo2(s.get75thPercentile())); - sb = Strings.appendKeyValue(sb, - histogram.getName() + "95th", - StringUtils.limitDecimalTo2(s.get95thPercentile())); - sb = Strings.appendKeyValue(sb, - histogram.getName() + "99th", - StringUtils.limitDecimalTo2(s.get99thPercentile())); - sb = Strings.appendKeyValue(sb, - histogram.getName() + "999th", - StringUtils.limitDecimalTo2(s.get999thPercentile())); - return sb; - } -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerStatistics.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerStatistics.java deleted file mode 100644 index 37c978e825e..00000000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerStatistics.java +++ /dev/null @@ -1,48 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.regionserver.metrics; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hbase.metrics.MetricsMBeanBase; -import org.apache.hadoop.metrics.util.MBeanUtil; -import org.apache.hadoop.metrics.util.MetricsRegistry; - -import javax.management.ObjectName; - -/** - * Exports metrics recorded by {@link RegionServerMetrics} as an MBean - * for JMX monitoring. - */ -@InterfaceAudience.Private -public class RegionServerStatistics extends MetricsMBeanBase { - - private final ObjectName mbeanName; - - public RegionServerStatistics(MetricsRegistry registry, String rsName) { - super(registry, "RegionServerStatistics"); - mbeanName = MBeanUtil.registerMBean("RegionServer", - "RegionServerStatistics", this); - } - - public void shutdown() { - if (mbeanName != null) - MBeanUtil.unregisterMBean(mbeanName); - } - -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/SchemaConfigured.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/SchemaConfigured.java deleted file mode 100644 index 525298468ef..00000000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/SchemaConfigured.java +++ /dev/null @@ -1,275 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.regionserver.metrics; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.io.HeapSize; -import org.apache.hadoop.hbase.io.hfile.HFile; -import org.apache.hadoop.hbase.regionserver.HRegion; -import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics.SchemaAware; -import org.apache.hadoop.hbase.util.ClassSize; - -/** - * A base class for objects that are associated with a particular table and - * column family. Provides a way to obtain the schema metrics object. - *

    - * Due to the variety of things that can be associated with a table/CF, there - * are many ways to initialize this base class, either in the constructor, or - * from another similar object. For example, an HFile reader configures HFile - * blocks it reads with its own table/CF name. - */ -@InterfaceAudience.Private -public class SchemaConfigured implements HeapSize, SchemaAware { - private static final Log LOG = LogFactory.getLog(SchemaConfigured.class); - - // These are not final because we set them at runtime e.g. for HFile blocks. - private String cfName; - private String tableName; - - /** - * Schema metrics. Can only be initialized when we know our column family - * name, table name, and have had a chance to take a look at the - * configuration (in {@link SchemaMetrics#configureGlobally(Configuration)) - * so we know whether we are using per-table metrics. Therefore, initialized - * lazily. We don't make this volatile because even if a thread sees a stale - * value of null, it will be re-initialized to the same value that other - * threads see. - */ - private SchemaMetrics schemaMetrics; - - static { - if (ClassSize.OBJECT <= 0 || ClassSize.REFERENCE <= 0) { - throw new AssertionError("Class sizes are not initialized"); - } - } - - /** - * Estimated heap size of this object. We don't count table name and column - * family name characters because these strings are shared among many - * objects. We need unaligned size to reuse this in subclasses. - */ - public static final int SCHEMA_CONFIGURED_UNALIGNED_HEAP_SIZE = - ClassSize.OBJECT + 3 * ClassSize.REFERENCE; - - private static final int SCHEMA_CONFIGURED_ALIGNED_HEAP_SIZE = - ClassSize.align(SCHEMA_CONFIGURED_UNALIGNED_HEAP_SIZE); - - /** A helper constructor that configures the "use table name" flag. */ - private SchemaConfigured(Configuration conf) { - SchemaMetrics.configureGlobally(conf); - // Even though we now know if table-level metrics are used, we can't - // initialize schemaMetrics yet, because CF and table name are only known - // to the calling constructor. - } - - /** - * Creates an instance corresponding to an unknown table and column family. - * Used in unit tests. - */ - public static SchemaConfigured createUnknown() { - return new SchemaConfigured(null, SchemaMetrics.UNKNOWN, - SchemaMetrics.UNKNOWN); - } - - /** - * Default constructor. Only use when column/family name are not known at - * construction (i.e. for HFile blocks). - */ - public SchemaConfigured() { - } - - /** - * Initialize table and column family name from an HFile path. If - * configuration is null, - * {@link SchemaMetrics#configureGlobally(Configuration)} should have been - * called already. - */ - public SchemaConfigured(Configuration conf, Path path) { - this(conf); - - if (path != null) { - String splits[] = path.toString().split("/"); - int numPathLevels = splits.length; - if (numPathLevels > 0 && splits[0].isEmpty()) { - // This path starts with an '/'. - --numPathLevels; - } - if (numPathLevels < HFile.MIN_NUM_HFILE_PATH_LEVELS) { - LOG.warn("Could not determine table and column family of the HFile " - + "path " + path + ". Expecting at least " - + HFile.MIN_NUM_HFILE_PATH_LEVELS + " path components."); - path = null; - } else { - cfName = splits[splits.length - 2]; - if (cfName.equals(HRegion.REGION_TEMP_SUBDIR)) { - // This is probably a compaction or flush output file. We will set - // the real CF name later. - cfName = null; - } else { - cfName = cfName.intern(); - } - tableName = splits[splits.length - 4].intern(); - return; - } - } - - // This might also happen if we were passed an incorrect path. - cfName = SchemaMetrics.UNKNOWN; - tableName = SchemaMetrics.UNKNOWN; - } - - /** - * Used when we know an HFile path to deduce table and CF name from, but do - * not have a configuration. - * @param path an HFile path - */ - public SchemaConfigured(Path path) { - this(null, path); - } - - /** - * Used when we know table and column family name. If configuration is null, - * {@link SchemaMetrics#configureGlobally(Configuration)} should have been - * called already. - */ - public SchemaConfigured(Configuration conf, String tableName, String cfName) - { - this(conf); - if (tableName != null) { - this.tableName = tableName.intern(); - } - if (cfName != null) { - this.cfName = cfName.intern(); - } - } - - public SchemaConfigured(SchemaAware that) { - tableName = that.getTableName().intern(); - cfName = that.getColumnFamilyName().intern(); - schemaMetrics = that.getSchemaMetrics(); - } - - @Override - public String getTableName() { - return tableName; - } - - @Override - public String getColumnFamilyName() { - return cfName; - } - - @Override - public SchemaMetrics getSchemaMetrics() { - if (schemaMetrics == null) { - if (tableName == null || cfName == null) { - throw new IllegalStateException("Schema metrics requested before " + - "table/CF name initialization: " + schemaConfAsJSON()); - } - schemaMetrics = SchemaMetrics.getInstance(tableName, cfName); - } - return schemaMetrics; - } - - /** - * Configures the given object (e.g. an HFile block) with the current table - * and column family name, and the associated collection of metrics. Please - * note that this method configures the other object, not this - * object. - */ - public void passSchemaMetricsTo(SchemaConfigured target) { - if (isNull()) { - resetSchemaMetricsConf(target); - return; - } - - if (!isSchemaConfigured()) { - // Cannot configure another object if we are not configured ourselves. - throw new IllegalStateException("Table name/CF not initialized: " + - schemaConfAsJSON()); - } - - if (conflictingWith(target)) { - // Make sure we don't try to change table or CF name. - throw new IllegalArgumentException("Trying to change table name to \"" + - tableName + "\", CF name to \"" + cfName + "\" from " + - target.schemaConfAsJSON()); - } - - target.tableName = tableName.intern(); - target.cfName = cfName.intern(); - target.schemaMetrics = schemaMetrics; - target.schemaConfigurationChanged(); - } - - /** - * Reset schema metrics configuration in this particular instance. Used when - * legitimately need to re-initialize the object with another table/CF. - * This is a static method because its use is discouraged and reserved for - * when it is really necessary (e.g. writing HFiles in a temp direcdtory - * on compaction). - */ - public static void resetSchemaMetricsConf(SchemaConfigured target) { - target.tableName = null; - target.cfName = null; - target.schemaMetrics = null; - target.schemaConfigurationChanged(); - } - - @Override - public long heapSize() { - return SCHEMA_CONFIGURED_ALIGNED_HEAP_SIZE; - } - - public String schemaConfAsJSON() { - return "{\"tableName\":\"" + tableName + "\",\"cfName\":\"" + cfName - + "\"}"; - } - - protected boolean isSchemaConfigured() { - return tableName != null && cfName != null; - } - - private boolean isNull() { - return tableName == null && cfName == null && schemaMetrics == null; - } - - /** - * Determines if the current object's table/CF settings are not in conflict - * with the other object's table and CF. If the other object's table/CF are - * undefined, they are not considered to be in conflict. Used to sanity-check - * configuring the other object with this object's table/CF. - */ - boolean conflictingWith(SchemaConfigured other) { - return (other.tableName != null && !tableName.equals(other.tableName)) || - (other.cfName != null && !cfName.equals(other.cfName)); - } - - /** - * A hook method called when schema configuration changes. Can be used to - * update schema-aware member fields. - */ - protected void schemaConfigurationChanged() { - } - -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/SchemaMetrics.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/SchemaMetrics.java deleted file mode 100644 index 7a824ac2d40..00000000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/SchemaMetrics.java +++ /dev/null @@ -1,926 +0,0 @@ -/* - * Copyright The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ - -package org.apache.hadoop.hbase.regionserver.metrics; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.TreeMap; -import java.util.TreeSet; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -import org.apache.commons.lang.mutable.MutableDouble; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.Pair; - -/** - * A names in a given column family or a (table, column - * family) combination. The following "dimensions" are supported: - *

      - *
    • Table name (optional; enabled based on configuration)
    • - *
    • Per-column family vs. aggregated. The aggregated mode is only supported - * when table name is not included.
    • - *
    • Block category (data, index, bloom filter, etc.)
    • - *
    • Whether the request is part of a compaction
    • - *
    • Metric type (read time, block read count, cache hits/misses, etc.)
    • - *
    - *

    - * An instance of this class does not store any metric values. It just allows - * to determine the correct metric name for each combination of the above - * dimensions. - *

    - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - *
    Metric keyPer-table metrics conf settingDescription
    OnOff
    tbl.T.cf.CF.M Include Skip A specific column family of a specific table
    tbl.T.M Skip Skip All column families in the given table
    cf.CF.M Skip Include A specific column family in all tables
    M Include Include All column families in all tables
    - */ -@InterfaceAudience.Private -public class SchemaMetrics { - - public interface SchemaAware { - public String getTableName(); - public String getColumnFamilyName(); - public SchemaMetrics getSchemaMetrics(); - } - - private static final Log LOG = LogFactory.getLog(SchemaMetrics.class); - - public static enum BlockMetricType { - // Metric configuration: compactionAware, timeVarying - READ_TIME("Read", true, true), - READ_COUNT("BlockReadCnt", true, false), - CACHE_HIT("BlockReadCacheHitCnt", true, false), - CACHE_MISS("BlockReadCacheMissCnt", true, false), - - CACHE_SIZE("blockCacheSize", false, false), - CACHED("blockCacheNumCached", false, false), - EVICTED("blockCacheNumEvicted", false, false); - - private final String metricStr; - private final boolean compactionAware; - private final boolean timeVarying; - - BlockMetricType(String metricStr, boolean compactionAware, - boolean timeVarying) { - this.metricStr = metricStr; - this.compactionAware = compactionAware; - this.timeVarying = timeVarying; - } - - @Override - public String toString() { - return metricStr; - } - - private static final String BLOCK_METRIC_TYPE_RE; - static { - StringBuilder sb = new StringBuilder(); - for (BlockMetricType bmt : values()) { - if (sb.length() > 0) - sb.append("|"); - sb.append(bmt); - } - BLOCK_METRIC_TYPE_RE = sb.toString(); - } - }; - - public static enum StoreMetricType { - STORE_FILE_COUNT("storeFileCount"), - STORE_FILE_INDEX_SIZE("storeFileIndexSizeMB"), - STORE_FILE_SIZE_MB("storeFileSizeMB"), - STATIC_BLOOM_SIZE_KB("staticBloomSizeKB"), - MEMSTORE_SIZE_MB("memstoreSizeMB"), - STATIC_INDEX_SIZE_KB("staticIndexSizeKB"), - FLUSH_SIZE("flushSize"); - - private final String metricStr; - - StoreMetricType(String metricStr) { - this.metricStr = metricStr; - } - - @Override - public String toString() { - return metricStr; - } - }; - - // Constants - /** - * A string used when column family or table name is unknown, and in some - * unit tests. This should not normally show up in metric names but if it - * does it is better than creating a silent discrepancy in total vs. - * per-CF/table metrics. - */ - public static final String UNKNOWN = "__unknown"; - - public static final String TABLE_PREFIX = "tbl."; - public static final String CF_PREFIX = "cf."; - public static final String BLOCK_TYPE_PREFIX = "bt."; - public static final String REGION_PREFIX = "region."; - - public static final String CF_UNKNOWN_PREFIX = CF_PREFIX + UNKNOWN + "."; - public static final String CF_BAD_FAMILY_PREFIX = CF_PREFIX + "__badfamily."; - - /** Use for readability when obtaining non-compaction counters */ - public static final boolean NO_COMPACTION = false; - - /** - * A special schema metric value that means "all tables aggregated" or - * "all column families aggregated" when used as a table name or a column - * family name. - */ - public static final String TOTAL_KEY = ""; - - /** - * Special handling for meta-block-specific metrics for - * backwards-compatibility. - */ - private static final String META_BLOCK_CATEGORY_STR = "Meta"; - - private static final int NUM_BLOCK_CATEGORIES = - BlockCategory.values().length; - - private static final int NUM_METRIC_TYPES = - BlockMetricType.values().length; - - static final boolean[] BOOL_VALUES = new boolean[] { false, true }; - - private static final int NUM_BLOCK_METRICS = - NUM_BLOCK_CATEGORIES * // blockCategory - BOOL_VALUES.length * // isCompaction - NUM_METRIC_TYPES; // metricType - - private static final int NUM_STORE_METRIC_TYPES = - StoreMetricType.values().length; - - /** Conf key controlling whether we include table name in metric names */ - private static final String SHOW_TABLE_NAME_CONF_KEY = - "hbase.metrics.showTableName"; - - /** We use this when too many column families are involved in a request. */ - private static final String MORE_CFS_OMITTED_STR = "__more"; - - /** - * Maximum length of a metric name prefix. Used when constructing metric - * names from a set of column families participating in a request. - */ - private static final int MAX_METRIC_PREFIX_LENGTH = - 256 - MORE_CFS_OMITTED_STR.length(); - - // Global variables - /** - * Maps a string key consisting of table name and column family name, with - * table name optionally replaced with {@link #TOTAL_KEY} if per-table - * metrics are disabled, to an instance of this class. - */ - private static final ConcurrentHashMap - tableAndFamilyToMetrics = new ConcurrentHashMap(); - - /** Metrics for all tables and column families. */ - // This has to be initialized after cfToMetrics. - public static final SchemaMetrics ALL_SCHEMA_METRICS = - getInstance(TOTAL_KEY, TOTAL_KEY); - - /** - * Whether to include table name in metric names. If this is null, it has not - * been initialized. This is a global instance, but we also have a copy of it - * per a {@link SchemaMetrics} object to avoid synchronization overhead. - */ - private static volatile Boolean useTableNameGlobally; - - /** Whether we logged a message about configuration inconsistency */ - private static volatile boolean loggedConfInconsistency; - - // Instance variables - private final String[] blockMetricNames = new String[NUM_BLOCK_METRICS]; - private final boolean[] blockMetricTimeVarying = - new boolean[NUM_BLOCK_METRICS]; - - private final String[] bloomMetricNames = new String[2]; - private final String[] storeMetricNames = new String[NUM_STORE_METRIC_TYPES]; - private final String[] storeMetricNamesMax = new String[NUM_STORE_METRIC_TYPES]; - - private SchemaMetrics(final String tableName, final String cfName) { - String metricPrefix = SchemaMetrics.generateSchemaMetricsPrefix( - tableName, cfName); - - for (BlockCategory blockCategory : BlockCategory.values()) { - for (boolean isCompaction : BOOL_VALUES) { - for (BlockMetricType metricType : BlockMetricType.values()) { - if (!metricType.compactionAware && isCompaction) { - continue; - } - - StringBuilder sb = new StringBuilder(metricPrefix); - if (blockCategory != BlockCategory.ALL_CATEGORIES - && blockCategory != BlockCategory.META) { - String categoryStr = blockCategory.toString(); - categoryStr = categoryStr.charAt(0) - + categoryStr.substring(1).toLowerCase(); - sb.append(BLOCK_TYPE_PREFIX + categoryStr + "."); - } - - if (metricType.compactionAware) { - sb.append(isCompaction ? "compaction" : "fs"); - } - - // A special-case for meta blocks for backwards-compatibility. - if (blockCategory == BlockCategory.META) { - sb.append(META_BLOCK_CATEGORY_STR); - } - - sb.append(metricType); - - int i = getBlockMetricIndex(blockCategory, isCompaction, metricType); - blockMetricNames[i] = sb.toString().intern(); - blockMetricTimeVarying[i] = metricType.timeVarying; - } - } - } - - for (boolean isInBloom : BOOL_VALUES) { - bloomMetricNames[isInBloom ? 1 : 0] = metricPrefix - + (isInBloom ? "keyMaybeInBloomCnt" : "keyNotInBloomCnt"); - } - - for (StoreMetricType storeMetric : StoreMetricType.values()) { - String coreName = metricPrefix + storeMetric.toString(); - storeMetricNames[storeMetric.ordinal()] = coreName; - storeMetricNamesMax[storeMetric.ordinal()] = coreName + ".max"; - } - } - - - - public static final String METRIC_GETSIZE = "getsize"; - public static final String METRIC_NEXTSIZE = "nextsize"; - - /** - * Returns a {@link SchemaMetrics} object for the given table and column - * family, instantiating it if necessary. - * - * @param tableName table name (null is interpreted as "unknown"). This is - * ignored - * @param cfName column family name (null is interpreted as "unknown") - */ - public static SchemaMetrics getInstance(String tableName, String cfName) { - if (tableName == null) { - tableName = UNKNOWN; - } - - if (cfName == null) { - cfName = UNKNOWN; - } - - tableName = getEffectiveTableName(tableName); - - final String instanceKey = tableName + "\t" + cfName; - SchemaMetrics schemaMetrics = tableAndFamilyToMetrics.get(instanceKey); - if (schemaMetrics != null) { - return schemaMetrics; - } - - schemaMetrics = new SchemaMetrics(tableName, cfName); - SchemaMetrics existingMetrics = - tableAndFamilyToMetrics.putIfAbsent(instanceKey, schemaMetrics); - return existingMetrics != null ? existingMetrics : schemaMetrics; - } - - private static final int getBlockMetricIndex(BlockCategory blockCategory, - boolean isCompaction, BlockMetricType metricType) { - int i = 0; - i = i * NUM_BLOCK_CATEGORIES + blockCategory.ordinal(); - i = i * BOOL_VALUES.length + (isCompaction ? 1 : 0); - i = i * NUM_METRIC_TYPES + metricType.ordinal(); - return i; - } - - public String getBlockMetricName(BlockCategory blockCategory, - boolean isCompaction, BlockMetricType metricType) { - if (isCompaction && !metricType.compactionAware) { - throw new IllegalArgumentException("isCompaction cannot be true for " - + metricType); - } - return blockMetricNames[getBlockMetricIndex(blockCategory, isCompaction, - metricType)]; - } - - public String getBloomMetricName(boolean isInBloom) { - return bloomMetricNames[isInBloom ? 1 : 0]; - } - - /** - * Increments the given metric, both per-CF and aggregate, for both the given - * category and all categories in aggregate (four counters total). - */ - private void incrNumericMetric(BlockCategory blockCategory, - boolean isCompaction, BlockMetricType metricType) { - if (blockCategory == null) { - blockCategory = BlockCategory.UNKNOWN; // So that we see this in stats. - } - RegionMetricsStorage.incrNumericMetric(getBlockMetricName(blockCategory, - isCompaction, metricType), 1); - - if (blockCategory != BlockCategory.ALL_CATEGORIES) { - incrNumericMetric(BlockCategory.ALL_CATEGORIES, isCompaction, - metricType); - } - } - - private void addToReadTime(BlockCategory blockCategory, - boolean isCompaction, long timeMs) { - RegionMetricsStorage.incrTimeVaryingMetric(getBlockMetricName(blockCategory, - isCompaction, BlockMetricType.READ_TIME), timeMs); - - // Also update the read time aggregated across all block categories - if (blockCategory != BlockCategory.ALL_CATEGORIES) { - addToReadTime(BlockCategory.ALL_CATEGORIES, isCompaction, timeMs); - } - } - - /** - * Used to accumulate store metrics across multiple regions in a region - * server. These metrics are not "persistent", i.e. we keep overriding them - * on every update instead of incrementing, so we need to accumulate them in - * a temporary map before pushing them to the global metric collection. - * @param tmpMap a temporary map for accumulating store metrics - * @param storeMetricType the store metric type to increment - * @param val the value to add to the metric - */ - public void accumulateStoreMetric(final Map tmpMap, - StoreMetricType storeMetricType, double val) { - final String key = getStoreMetricName(storeMetricType); - if (tmpMap.get(key) == null) { - tmpMap.put(key, new MutableDouble(val)); - } else { - tmpMap.get(key).add(val); - } - - if (this == ALL_SCHEMA_METRICS) { - // also compute the max value across all Stores on this server - final String maxKey = getStoreMetricNameMax(storeMetricType); - MutableDouble cur = tmpMap.get(maxKey); - if (cur == null) { - tmpMap.put(maxKey, new MutableDouble(val)); - } else if (cur.doubleValue() < val) { - cur.setValue(val); - } - } else { - ALL_SCHEMA_METRICS.accumulateStoreMetric(tmpMap, storeMetricType, val); - } - } - - public String getStoreMetricName(StoreMetricType storeMetricType) { - return storeMetricNames[storeMetricType.ordinal()]; - } - - public String getStoreMetricNameMax(StoreMetricType storeMetricType) { - return storeMetricNamesMax[storeMetricType.ordinal()]; - } - - /** - * Update a metric that does not get reset on every poll. - * @param storeMetricType the store metric to update - * @param value the value to update the metric to - */ - public void updatePersistentStoreMetric(StoreMetricType storeMetricType, - long value) { - RegionMetricsStorage.incrNumericPersistentMetric( - storeMetricNames[storeMetricType.ordinal()], value); - } - - /** - * Updates the number of hits and the total number of block reads on a block - * cache hit. - */ - public void updateOnCacheHit(BlockCategory blockCategory, - boolean isCompaction) { - blockCategory.expectSpecific(); - incrNumericMetric(blockCategory, isCompaction, BlockMetricType.CACHE_HIT); - incrNumericMetric(blockCategory, isCompaction, BlockMetricType.READ_COUNT); - if (this != ALL_SCHEMA_METRICS) { - ALL_SCHEMA_METRICS.updateOnCacheHit(blockCategory, isCompaction); - } - } - - /** - * Updates read time, the number of misses, and the total number of block - * reads on a block cache miss. - */ - public void updateOnCacheMiss(BlockCategory blockCategory, - boolean isCompaction, long timeMs) { - blockCategory.expectSpecific(); - addToReadTime(blockCategory, isCompaction, timeMs); - incrNumericMetric(blockCategory, isCompaction, BlockMetricType.CACHE_MISS); - incrNumericMetric(blockCategory, isCompaction, BlockMetricType.READ_COUNT); - if (this != ALL_SCHEMA_METRICS) { - ALL_SCHEMA_METRICS.updateOnCacheMiss(blockCategory, isCompaction, - timeMs); - } - } - - /** - * Adds the given delta to the cache size for the given block category and - * the aggregate metric for all block categories. Updates both the per-CF - * counter and the counter for all CFs (four metrics total). The cache size - * metric is "persistent", i.e. it does not get reset when metrics are - * collected. - */ - public void addToCacheSize(BlockCategory category, long cacheSizeDelta) { - if (category == null) { - category = BlockCategory.ALL_CATEGORIES; - } - RegionMetricsStorage.incrNumericPersistentMetric(getBlockMetricName(category, false, - BlockMetricType.CACHE_SIZE), cacheSizeDelta); - - if (category != BlockCategory.ALL_CATEGORIES) { - addToCacheSize(BlockCategory.ALL_CATEGORIES, cacheSizeDelta); - } - } - - public void updateOnCachePutOrEvict(BlockCategory blockCategory, - long cacheSizeDelta, boolean isEviction) { - addToCacheSize(blockCategory, cacheSizeDelta); - incrNumericMetric(blockCategory, false, - isEviction ? BlockMetricType.EVICTED : BlockMetricType.CACHED); - if (this != ALL_SCHEMA_METRICS) { - ALL_SCHEMA_METRICS.updateOnCachePutOrEvict(blockCategory, cacheSizeDelta, - isEviction); - } - } - - /** - * Increments both the per-CF and the aggregate counter of bloom - * positives/negatives as specified by the argument. - */ - public void updateBloomMetrics(boolean isInBloom) { - RegionMetricsStorage.incrNumericMetric(getBloomMetricName(isInBloom), 1); - if (this != ALL_SCHEMA_METRICS) { - ALL_SCHEMA_METRICS.updateBloomMetrics(isInBloom); - } - } - - /** - * Sets the flag whether to use table name in metric names according to the - * given configuration. This must be called at least once before - * instantiating HFile readers/writers. - */ - public static void configureGlobally(Configuration conf) { - if (conf != null) { - final boolean useTableNameNew = - conf.getBoolean(SHOW_TABLE_NAME_CONF_KEY, false); - setUseTableName(useTableNameNew); - } else { - setUseTableName(false); - } - } - - /** - * Determine the table name to be included in metric keys. If the global - * configuration says that we should not use table names in metrics, - * we always return {@link #TOTAL_KEY} even if nontrivial table name is - * provided. - * - * @param tableName a table name or {@link #TOTAL_KEY} when aggregating - * across all tables - * @return the table name to use in metric keys - */ - private static String getEffectiveTableName(String tableName) { - if (!tableName.equals(TOTAL_KEY)) { - // We are provided with a non-trivial table name (including "unknown"). - // We need to know whether table name should be included into metrics. - if (useTableNameGlobally == null) { - throw new IllegalStateException("The value of the " - + SHOW_TABLE_NAME_CONF_KEY + " conf option has not been specified " - + "in SchemaMetrics"); - } - final boolean useTableName = useTableNameGlobally; - if (!useTableName) { - // Don't include table name in metric keys. - tableName = TOTAL_KEY; - } - } - return tableName; - } - - /** - * Method to transform a combination of a table name and a column family name - * into a metric key prefix. Tables/column family names equal to - * {@link #TOTAL_KEY} are omitted from the prefix. - * - * @param tableName the table name or {@link #TOTAL_KEY} for all tables - * @param cfName the column family name or {@link #TOTAL_KEY} for all CFs - * @return the metric name prefix, ending with a dot. - */ - public static String generateSchemaMetricsPrefix(String tableName, - final String cfName) { - tableName = getEffectiveTableName(tableName); - String schemaMetricPrefix = - tableName.equals(TOTAL_KEY) ? "" : TABLE_PREFIX + tableName + "."; - schemaMetricPrefix += - cfName.equals(TOTAL_KEY) ? "" : CF_PREFIX + cfName + "."; - return schemaMetricPrefix; - } - - public static String generateSchemaMetricsPrefix(byte[] tableName, - byte[] cfName) { - return generateSchemaMetricsPrefix(Bytes.toString(tableName), - Bytes.toString(cfName)); - } - - /** - * Method to transform a set of column families in byte[] format with table - * name into a metric key prefix. - * - * @param tableName the table name or {@link #TOTAL_KEY} for all tables - * @param families the ordered set of column families - * @return the metric name prefix, ending with a dot, or an empty string in - * case of invalid arguments. This is OK since we always expect - * some CFs to be included. - */ - public static String generateSchemaMetricsPrefix(String tableName, - Set families) { - if (families == null || families.isEmpty() || - tableName == null || tableName.isEmpty()) { - return ""; - } - - if (families.size() == 1) { - return generateSchemaMetricsPrefix(tableName, - Bytes.toString(families.iterator().next())); - } - - tableName = getEffectiveTableName(tableName); - List sortedFamilies = new ArrayList(families); - Collections.sort(sortedFamilies, Bytes.BYTES_COMPARATOR); - - StringBuilder sb = new StringBuilder(); - - int numCFsLeft = families.size(); - for (byte[] family : sortedFamilies) { - if (sb.length() > MAX_METRIC_PREFIX_LENGTH) { - sb.append(MORE_CFS_OMITTED_STR); - break; - } - --numCFsLeft; - sb.append(Bytes.toString(family)); - if (numCFsLeft > 0) { - sb.append("~"); - } - } - - return SchemaMetrics.generateSchemaMetricsPrefix(tableName, sb.toString()); - } - - - /** - * Get the prefix for metrics generated about a single region. - * @param tableName the table name or {@link #TOTAL_KEY} for all tables - * @param regionName regionName - * @return the prefix for this table/region combination. - */ - static String generateRegionMetricsPrefix(String tableName, String regionName) { - tableName = getEffectiveTableName(tableName); - String schemaMetricPrefix = - tableName.equals(TOTAL_KEY) ? "" : TABLE_PREFIX + tableName + "."; - schemaMetricPrefix += - regionName.equals(TOTAL_KEY) ? "" : REGION_PREFIX + regionName + "."; - - return schemaMetricPrefix; - } - - /** - * Sets the flag of whether to use table name in metric names. This flag - * is specified in configuration and is not expected to change at runtime, - * so we log an error message when it does change. - */ - private static void setUseTableName(final boolean useTableNameNew) { - if (useTableNameGlobally == null) { - // This configuration option has not yet been set. - useTableNameGlobally = useTableNameNew; - } else if (useTableNameGlobally != useTableNameNew - && !loggedConfInconsistency) { - // The configuration is inconsistent and we have not reported it - // previously. Once we report it, just keep ignoring the new setting. - LOG.error("Inconsistent configuration. Previous configuration " - + "for using table name in metrics: " + useTableNameGlobally + ", " - + "new configuration: " + useTableNameNew); - loggedConfInconsistency = true; - } - } - - // Methods used in testing - - private static final String regexEscape(String s) { - return s.replace(".", "\\."); - } - - /** - * Assume that table names used in tests don't contain dots, except for the - * META table. - */ - private static final String WORD_AND_DOT_RE_STR = "([^.]+|" + - regexEscape(Bytes.toString(HConstants.META_TABLE_NAME)) + - ")\\."; - - /** "tbl.." */ - private static final String TABLE_NAME_RE_STR = - "\\b" + regexEscape(TABLE_PREFIX) + WORD_AND_DOT_RE_STR; - - /** "cf.." */ - private static final String CF_NAME_RE_STR = - "\\b" + regexEscape(CF_PREFIX) + WORD_AND_DOT_RE_STR; - private static final Pattern CF_NAME_RE = Pattern.compile(CF_NAME_RE_STR); - - /** "tbl..cf.." */ - private static final Pattern TABLE_AND_CF_NAME_RE = Pattern.compile( - TABLE_NAME_RE_STR + CF_NAME_RE_STR); - - private static final Pattern BLOCK_CATEGORY_RE = Pattern.compile( - "\\b" + regexEscape(BLOCK_TYPE_PREFIX) + "[^.]+\\." + - // Also remove the special-case block type marker for meta blocks - "|" + META_BLOCK_CATEGORY_STR + "(?=" + - BlockMetricType.BLOCK_METRIC_TYPE_RE + ")"); - - /** - * A suffix for the "number of operations" part of "time-varying metrics". We - * only use this for metric verification in unit testing. Time-varying - * metrics are handled by a different code path in production. - */ - private static String NUM_OPS_SUFFIX = "numops"; - - /** - * A custom suffix that we use for verifying the second component of - * a "time-varying metric". - */ - private static String TOTAL_SUFFIX = "_total"; - private static final Pattern TIME_VARYING_SUFFIX_RE = Pattern.compile( - "(" + NUM_OPS_SUFFIX + "|" + TOTAL_SUFFIX + ")$"); - - void printMetricNames() { - for (BlockCategory blockCategory : BlockCategory.values()) { - for (boolean isCompaction : BOOL_VALUES) { - for (BlockMetricType metricType : BlockMetricType.values()) { - int i = getBlockMetricIndex(blockCategory, isCompaction, metricType); - LOG.debug("blockCategory=" + blockCategory + ", " - + "metricType=" + metricType + ", isCompaction=" + isCompaction - + ", metricName=" + blockMetricNames[i]); - } - } - } - } - - private Collection getAllMetricNames() { - List allMetricNames = new ArrayList(); - for (int i = 0; i < blockMetricNames.length; ++i) { - final String blockMetricName = blockMetricNames[i]; - final boolean timeVarying = blockMetricTimeVarying[i]; - if (blockMetricName != null) { - if (timeVarying) { - allMetricNames.add(blockMetricName + NUM_OPS_SUFFIX); - allMetricNames.add(blockMetricName + TOTAL_SUFFIX); - } else { - allMetricNames.add(blockMetricName); - } - } - } - allMetricNames.addAll(Arrays.asList(bloomMetricNames)); - return allMetricNames; - } - - private static final boolean isTimeVaryingKey(String metricKey) { - return metricKey.endsWith(NUM_OPS_SUFFIX) - || metricKey.endsWith(TOTAL_SUFFIX); - } - - private static final String stripTimeVaryingSuffix(String metricKey) { - return TIME_VARYING_SUFFIX_RE.matcher(metricKey).replaceAll(""); - } - - public static Map getMetricsSnapshot() { - Map metricsSnapshot = new TreeMap(); - for (SchemaMetrics cfm : tableAndFamilyToMetrics.values()) { - for (String metricName : cfm.getAllMetricNames()) { - long metricValue; - if (isTimeVaryingKey(metricName)) { - Pair totalAndCount = - RegionMetricsStorage.getTimeVaryingMetric(stripTimeVaryingSuffix(metricName)); - metricValue = metricName.endsWith(TOTAL_SUFFIX) ? - totalAndCount.getFirst() : totalAndCount.getSecond(); - } else { - metricValue = RegionMetricsStorage.getNumericMetric(metricName); - } - - metricsSnapshot.put(metricName, metricValue); - } - } - return metricsSnapshot; - } - - public static long getLong(Map m, String k) { - Long l = m.get(k); - return l != null ? l : 0; - } - - private static void putLong(Map m, String k, long v) { - if (v != 0) { - m.put(k, v); - } else { - m.remove(k); - } - } - - /** - * @return the difference between two sets of metrics (second minus first). - * Only includes keys that have nonzero difference. - */ - public static Map diffMetrics(Map a, - Map b) { - Set allKeys = new TreeSet(a.keySet()); - allKeys.addAll(b.keySet()); - Map diff = new TreeMap(); - for (String k : allKeys) { - long aVal = getLong(a, k); - long bVal = getLong(b, k); - if (aVal != bVal) { - diff.put(k, bVal - aVal); - } - } - return diff; - } - - public static void validateMetricChanges(Map oldMetrics) { - final Map newMetrics = getMetricsSnapshot(); - final Map allCfDeltas = new TreeMap(); - final Map allBlockCategoryDeltas = - new TreeMap(); - final Map deltas = diffMetrics(oldMetrics, newMetrics); - final Pattern cfTableMetricRE = - useTableNameGlobally ? TABLE_AND_CF_NAME_RE : CF_NAME_RE; - final Set allKeys = new TreeSet(oldMetrics.keySet()); - allKeys.addAll(newMetrics.keySet()); - - for (SchemaMetrics cfm : tableAndFamilyToMetrics.values()) { - for (String metricName : cfm.getAllMetricNames()) { - if (metricName.startsWith(CF_PREFIX + CF_PREFIX)) { - throw new AssertionError("Column family prefix used twice: " + - metricName); - } - - final long oldValue = getLong(oldMetrics, metricName); - final long newValue = getLong(newMetrics, metricName); - final long delta = newValue - oldValue; - - // Re-calculate values of metrics with no column family (or CF/table) - // specified based on all metrics with CF (or CF/table) specified. - if (delta != 0) { - if (cfm != ALL_SCHEMA_METRICS) { - final String aggregateMetricName = - cfTableMetricRE.matcher(metricName).replaceAll(""); - if (!aggregateMetricName.equals(metricName)) { - LOG.debug("Counting " + delta + " units of " + metricName - + " towards " + aggregateMetricName); - - putLong(allCfDeltas, aggregateMetricName, - getLong(allCfDeltas, aggregateMetricName) + delta); - } - } else { - LOG.debug("Metric=" + metricName + ", delta=" + delta); - } - } - - Matcher matcher = BLOCK_CATEGORY_RE.matcher(metricName); - if (matcher.find()) { - // Only process per-block-category metrics - String metricNoBlockCategory = matcher.replaceAll(""); - - putLong(allBlockCategoryDeltas, metricNoBlockCategory, - getLong(allBlockCategoryDeltas, metricNoBlockCategory) + delta); - } - } - } - - StringBuilder errors = new StringBuilder(); - for (String key : ALL_SCHEMA_METRICS.getAllMetricNames()) { - long actual = getLong(deltas, key); - long expected = getLong(allCfDeltas, key); - if (actual != expected) { - if (errors.length() > 0) - errors.append("\n"); - errors.append("The all-CF metric " + key + " changed by " - + actual + " but the aggregation of per-CF/table metrics " - + "yields " + expected); - } - } - - // Verify metrics computed for all block types based on the aggregation - // of per-block-type metrics. - for (String key : allKeys) { - if (BLOCK_CATEGORY_RE.matcher(key).find() || - key.contains(ALL_SCHEMA_METRICS.getBloomMetricName(false)) || - key.contains(ALL_SCHEMA_METRICS.getBloomMetricName(true))){ - // Skip per-block-category metrics. Also skip bloom filters, because - // they are not aggregated per block type. - continue; - } - long actual = getLong(deltas, key); - long expected = getLong(allBlockCategoryDeltas, key); - if (actual != expected) { - if (errors.length() > 0) - errors.append("\n"); - errors.append("The all-block-category metric " + key - + " changed by " + actual + " but the aggregation of " - + "per-block-category metrics yields " + expected); - } - } - - if (errors.length() > 0) { - throw new AssertionError(errors.toString()); - } - } - - /** - * Creates an instance pretending both the table and column family are - * unknown. Used in unit tests. - */ - public static SchemaMetrics getUnknownInstanceForTest() { - return getInstance(UNKNOWN, UNKNOWN); - } - - /** - * Set the flag to use or not use table name in metric names. Used in unit - * tests, so the flag can be set arbitrarily. - */ - public static void setUseTableNameInTest(final boolean useTableNameNew) { - useTableNameGlobally = useTableNameNew; - } - - /** Formats the given map of metrics in a human-readable way. */ - public static String formatMetrics(Map metrics) { - StringBuilder sb = new StringBuilder(); - for (Map.Entry entry : metrics.entrySet()) { - if (sb.length() > 0) { - sb.append('\n'); - } - sb.append(entry.getKey() + " : " + entry.getValue()); - } - return sb.toString(); - } - -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationSinkMetrics.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSink.java similarity index 85% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationSinkMetrics.java rename to hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSink.java index 1b57aee0062..676fe25066e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationSinkMetrics.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSink.java @@ -16,26 +16,27 @@ * limitations under the License. */ -package org.apache.hadoop.hbase.replication.regionserver.metrics; +package org.apache.hadoop.hbase.replication.regionserver; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hbase.CompatibilitySingletonFactory; +import org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSource; /** * This class is for maintaining the various replication statistics for a sink and publishing them * through the metrics interfaces. */ @InterfaceAudience.Private -public class ReplicationSinkMetrics { +public class MetricsSink { public static final String SINK_AGE_OF_LAST_APPLIED_OP = "sink.ageOfLastAppliedOp"; public static final String SINK_APPLIED_BATCHES = "sink.appliedBatches"; public static final String SINK_APPLIED_OPS = "sink.appliedOps"; - private ReplicationMetricsSource rms; + private MetricsReplicationSource rms; - public ReplicationSinkMetrics() { - rms = CompatibilitySingletonFactory.getInstance(ReplicationMetricsSource.class); + public MetricsSink() { + rms = CompatibilitySingletonFactory.getInstance(MetricsReplicationSource.class); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationSourceMetrics.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java similarity index 88% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationSourceMetrics.java rename to hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java index fe24d396ba3..e632c473829 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationSourceMetrics.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java @@ -16,19 +16,20 @@ * limitations under the License. */ -package org.apache.hadoop.hbase.replication.regionserver.metrics; +package org.apache.hadoop.hbase.replication.regionserver; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hbase.CompatibilitySingletonFactory; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; /** * This class is for maintaining the various replication statistics for a source and publishing them * through the metrics interfaces. */ @InterfaceAudience.Private -public class ReplicationSourceMetrics { +public class MetricsSource { public static final String SOURCE_SIZE_OF_LOG_QUEUE = "source.sizeOfLogQueue"; public static final String SOURCE_AGE_OF_LAST_SHIPPED_OP = "source.ageOfLastShippedOp"; @@ -37,7 +38,7 @@ public class ReplicationSourceMetrics { public static final String SOURCE_SHIPPED_BATCHES = "source.shippedBatches"; public static final String SOURCE_SHIPPED_OPS = "source.shippedOps"; - public static final Log LOG = LogFactory.getLog(ReplicationSourceMetrics.class); + public static final Log LOG = LogFactory.getLog(MetricsSource.class); private String id; private long lastTimestamp = 0; @@ -50,14 +51,14 @@ public class ReplicationSourceMetrics { private final String shippedBatchesKey; private final String shippedOpsKey; - private ReplicationMetricsSource rms; + private MetricsReplicationSource rms; /** * Constructor used to register the metrics * * @param id Name of the source this class is monitoring */ - public ReplicationSourceMetrics(String id) { + public MetricsSource(String id) { this.id = id; sizeOfLogQueKey = "source." + id + ".sizeOfLogQueue"; @@ -66,7 +67,7 @@ public class ReplicationSourceMetrics { logEditsFilteredKey = "source." + id + ".logEditsFiltered"; shippedBatchesKey = "source." + this.id + ".shippedBatches"; shippedOpsKey = "source." + this.id + ".shippedOps"; - rms = CompatibilitySingletonFactory.getInstance(ReplicationMetricsSource.class); + rms = CompatibilitySingletonFactory.getInstance(MetricsReplicationSource.class); } /** @@ -75,7 +76,7 @@ public class ReplicationSourceMetrics { * @param timestamp write time of the edit */ public void setAgeOfLastShippedOp(long timestamp) { - long age = System.currentTimeMillis() - timestamp; + long age = EnvironmentEdgeManager.currentTimeMillis() - timestamp; rms.setGauge(ageOfLastShippedOpKey, age); rms.setGauge(SOURCE_AGE_OF_LAST_SHIPPED_OP, age); this.lastTimestamp = timestamp; @@ -146,13 +147,13 @@ public class ReplicationSourceMetrics { /** Removes all metrics about this Source. */ public void clear() { - rms.removeGauge(sizeOfLogQueKey); + rms.removeMetric(sizeOfLogQueKey); rms.decGauge(SOURCE_SIZE_OF_LOG_QUEUE, lastQueueSize); lastQueueSize = 0; - rms.removeGauge(ageOfLastShippedOpKey); + rms.removeMetric(ageOfLastShippedOpKey); - rms.removeCounter(logEditsFilteredKey); - rms.removeCounter(logEditsReadKey); + rms.removeMetric(logEditsFilteredKey); + rms.removeMetric(logEditsReadKey); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java index b5ebe74c629..34ba0204e80 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java @@ -46,7 +46,6 @@ import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Row; import org.apache.hadoop.hbase.regionserver.wal.HLog; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; -import org.apache.hadoop.hbase.replication.regionserver.metrics.ReplicationSinkMetrics; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Threads; @@ -73,7 +72,7 @@ public class ReplicationSink { private final Configuration conf; private final ExecutorService sharedThreadPool; private final HConnection sharedHtableCon; - private final ReplicationSinkMetrics metrics; + private final MetricsSink metrics; /** * Create a sink for replication @@ -86,7 +85,7 @@ public class ReplicationSink { throws IOException { this.conf = HBaseConfiguration.create(conf); decorateConf(); - this.metrics = new ReplicationSinkMetrics(); + this.metrics = new MetricsSink(); this.sharedHtableCon = HConnectionManager.createConnection(this.conf); this.sharedThreadPool = new ThreadPoolExecutor(1, conf.getInt("hbase.htable.threads.max", Integer.MAX_VALUE), diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java index f18b198a3d6..1312f7a4e81 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java @@ -56,12 +56,8 @@ import org.apache.hadoop.hbase.regionserver.wal.HLogFactory; import org.apache.hadoop.hbase.regionserver.wal.HLogKey; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.replication.ReplicationZookeeper; -import org.apache.hadoop.hbase.replication.regionserver.metrics.ReplicationSourceMetrics; -import org.apache.hadoop.hbase.regionserver.wal.HLog; -import org.apache.hadoop.hbase.regionserver.wal.HLogUtil; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Threads; -import org.apache.hadoop.hbase.zookeeper.ZKClusterId; import org.apache.hadoop.ipc.RemoteException; import org.apache.zookeeper.KeeperException; @@ -141,7 +137,7 @@ public class ReplicationSource extends Thread // Indicates if this particular source is running private volatile boolean running = true; // Metrics for this source - private ReplicationSourceMetrics metrics; + private MetricsSource metrics; /** * Instantiation method used by region servers @@ -188,7 +184,7 @@ public class ReplicationSource extends Thread this.sleepForRetries = this.conf.getLong("replication.source.sleepforretries", 1000); this.fs = fs; - this.metrics = new ReplicationSourceMetrics(peerClusterZnode); + this.metrics = new MetricsSource(peerClusterZnode); try { this.clusterId = zkHelper.getUUIDForCluster(zkHelper.getZookeeperWatcher()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/metrics/RESTMetrics.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/MetricsREST.java similarity index 78% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/rest/metrics/RESTMetrics.java rename to hbase-server/src/main/java/org/apache/hadoop/hbase/rest/MetricsREST.java index b66881545fa..98f48a70180 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/metrics/RESTMetrics.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/MetricsREST.java @@ -17,30 +17,24 @@ * limitations under the License. */ -package org.apache.hadoop.hbase.rest.metrics; +package org.apache.hadoop.hbase.rest; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hbase.CompatibilitySingletonFactory; -import org.apache.hadoop.hbase.metrics.MetricsRate; -import org.apache.hadoop.metrics.MetricsContext; -import org.apache.hadoop.metrics.MetricsRecord; -import org.apache.hadoop.metrics.MetricsUtil; -import org.apache.hadoop.metrics.Updater; -import org.apache.hadoop.metrics.jvm.JvmMetrics; -import org.apache.hadoop.metrics.util.MetricsRegistry; +import org.apache.hadoop.hbase.rest.MetricsRESTSource; @InterfaceAudience.Private -public class RESTMetrics { +public class MetricsREST { - public RESTMetricsSource getSource() { + public MetricsRESTSource getSource() { return source; } - private RESTMetricsSource source; + private MetricsRESTSource source; - public RESTMetrics() { - source = CompatibilitySingletonFactory.getInstance(RESTMetricsSource.class); + public MetricsREST() { + source = CompatibilitySingletonFactory.getInstance(MetricsRESTSource.class); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java index 5cd1e383ddf..00309551c1e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java @@ -24,7 +24,6 @@ import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.client.HTablePool; -import org.apache.hadoop.hbase.rest.metrics.RESTMetrics; /** * Singleton class encapsulating global REST servlet state and functions. @@ -34,7 +33,7 @@ public class RESTServlet implements Constants { private static RESTServlet INSTANCE; private final Configuration conf; private final HTablePool pool; - private final RESTMetrics metrics = new RESTMetrics(); + private final MetricsREST metrics = new MetricsREST(); /** * @return the RESTServlet singleton instance @@ -80,7 +79,7 @@ public class RESTServlet implements Constants { return conf; } - RESTMetrics getMetrics() { + MetricsREST getMetrics() { return metrics; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift/ThriftMetrics.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift/ThriftMetrics.java index 1568b7edcd9..bc600a36397 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift/ThriftMetrics.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift/ThriftMetrics.java @@ -22,8 +22,6 @@ package org.apache.hadoop.hbase.thrift; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CompatibilitySingletonFactory; -import org.apache.hadoop.hbase.thrift.metrics.ThriftServerMetricsSource; -import org.apache.hadoop.hbase.thrift.metrics.ThriftServerMetricsSourceFactory; /** * This class is for maintaining the various statistics of thrift server @@ -38,15 +36,15 @@ public class ThriftMetrics { TWO } - public ThriftServerMetricsSource getSource() { + public MetricsThriftServerSource getSource() { return source; } - public void setSource(ThriftServerMetricsSource source) { + public void setSource(MetricsThriftServerSource source) { this.source = source; } - private ThriftServerMetricsSource source; + private MetricsThriftServerSource source; private final long slowResponseTime; public static final String SLOW_RESPONSE_NANO_SEC = "hbase.thrift.slow.response.nano.second"; @@ -57,9 +55,9 @@ public class ThriftMetrics { slowResponseTime = conf.getLong( SLOW_RESPONSE_NANO_SEC, DEFAULT_SLOW_RESPONSE_NANO_SEC); if (t == ThriftServerType.ONE) { - source = CompatibilitySingletonFactory.getInstance(ThriftServerMetricsSourceFactory.class).createThriftOneSource(); + source = CompatibilitySingletonFactory.getInstance(MetricsThriftServerSourceFactory.class).createThriftOneSource(); } else if (t == ThriftServerType.TWO) { - source = CompatibilitySingletonFactory.getInstance(ThriftServerMetricsSourceFactory.class).createThriftTwoSource(); + source = CompatibilitySingletonFactory.getInstance(MetricsThriftServerSourceFactory.class).createThriftTwoSource(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java index b08f8f23294..9e94004ec8e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java @@ -46,7 +46,6 @@ import org.apache.hadoop.hbase.io.hfile.LruBlockCache; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.MemStore; import org.apache.hadoop.hbase.regionserver.HStore; -import org.apache.hadoop.hbase.regionserver.metrics.SchemaConfigured; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ClassSize; import org.junit.experimental.categories.Category; @@ -313,12 +312,6 @@ public class TestHeapSize extends TestCase { assertEquals(expected, actual); } - // SchemaConfigured - LOG.debug("Heap size for: " + SchemaConfigured.class.getName()); - SchemaConfigured sc = new SchemaConfigured(null, "myTable", "myCF"); - assertEquals(ClassSize.estimateBase(SchemaConfigured.class, true), - sc.heapSize()); - // Store Overhead cl = HStore.class; actual = HStore.FIXED_OVERHEAD; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java index c73c49147c0..6eb26baecaf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java @@ -37,7 +37,6 @@ import org.apache.hadoop.hbase.MultithreadedTestUtil; import org.apache.hadoop.hbase.MultithreadedTestUtil.TestThread; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.util.ChecksumType; -import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; public class CacheTestUtils { @@ -281,17 +280,6 @@ public class CacheTestUtils { } }; } - - @Override - public BlockType getBlockType() { - return BlockType.DATA; - } - - @Override - public SchemaMetrics getSchemaMetrics() { - return SchemaMetrics.getUnknownInstanceForTest(); - } - } private static HFileBlockPair[] generateHFileBlocks(int blockSize, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java index da8ffd7fbd4..d4995deb386 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java @@ -47,7 +47,6 @@ import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.StoreFile; import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType; -import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; import org.apache.hadoop.hbase.util.BloomFilterFactory; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ChecksumType; @@ -361,13 +360,9 @@ public class TestCacheOnWrite { (LruBlockCache) new CacheConfig(conf).getBlockCache(); blockCache.clearCache(); assertEquals(0, blockCache.getBlockTypeCountsForTest().size()); - Map metricsBefore = SchemaMetrics.getMetricsSnapshot(); region.compactStores(); LOG.debug("compactStores() returned"); - SchemaMetrics.validateMetricChanges(metricsBefore); - Map compactionMetrics = SchemaMetrics.diffMetrics( - metricsBefore, SchemaMetrics.getMetricsSnapshot()); - LOG.debug(SchemaMetrics.formatMetrics(compactionMetrics)); + Map blockTypesInCache = blockCache.getBlockTypeCountsForTest(); LOG.debug("Block types in cache: " + blockTypesInCache); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCachedBlockQueue.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCachedBlockQueue.java index 7a712b4a26e..d8e09fc3dbf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCachedBlockQueue.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCachedBlockQueue.java @@ -20,8 +20,6 @@ package org.apache.hadoop.hbase.io.hfile; import java.nio.ByteBuffer; -import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; - import junit.framework.TestCase; import org.apache.hadoop.hbase.SmallTests; import org.junit.experimental.categories.Category; @@ -137,15 +135,6 @@ public class TestCachedBlockQueue extends TestCase { return null; } - @Override - public BlockType getBlockType() { - return BlockType.DATA; - } - - @Override - public SchemaMetrics getSchemaMetrics() { - return SchemaMetrics.ALL_SCHEMA_METRICS; - } }, accessTime, false); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java index 258ded57428..59c48451003 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java @@ -36,8 +36,6 @@ import org.apache.hadoop.hbase.io.compress.Compression.Algorithm; import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType; -import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; -import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics.BlockMetricType; import org.apache.hadoop.hbase.util.Bytes; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -103,7 +101,7 @@ public class TestForceCacheImportantBlocks { TEST_UTIL.getConfiguration().setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, BLOCK_SIZE); - SchemaMetrics.setUseTableNameInTest(false); + HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toBytes(CF)) .setMaxVersions(MAX_VERSIONS) @@ -113,16 +111,12 @@ public class TestForceCacheImportantBlocks { hcd.setBlockCacheEnabled(cfCacheEnabled); HRegion region = TEST_UTIL.createTestRegion(TABLE, hcd); writeTestData(region); - Map metricsBefore = SchemaMetrics.getMetricsSnapshot(); + for (int i = 0; i < NUM_ROWS; ++i) { Get get = new Get(Bytes.toBytes("row" + i)); region.get(get, null); } - SchemaMetrics.validateMetricChanges(metricsBefore); - Map metricsAfter = SchemaMetrics.getMetricsSnapshot(); - Map metricsDelta = SchemaMetrics.diffMetrics(metricsBefore, - metricsAfter); - SchemaMetrics metrics = SchemaMetrics.getInstance(TABLE, CF); + List importantBlockCategories = new ArrayList(); importantBlockCategories.add(BlockCategory.BLOOM); @@ -130,30 +124,8 @@ public class TestForceCacheImportantBlocks { // We only have index blocks for HFile v2. importantBlockCategories.add(BlockCategory.INDEX); } - - for (BlockCategory category : importantBlockCategories) { - String hitsMetricName = getMetricName(metrics, category); - assertTrue("Metric " + hitsMetricName + " was not incremented", - metricsDelta.containsKey(hitsMetricName)); - long hits = metricsDelta.get(hitsMetricName); - assertTrue("Invalid value of " + hitsMetricName + ": " + hits, hits > 0); - } - - if (!cfCacheEnabled) { - // Caching is turned off for the CF, so make sure we are not caching data - // blocks. - String dataHitMetricName = getMetricName(metrics, BlockCategory.DATA); - assertFalse("Nonzero value for metric " + dataHitMetricName, - metricsDelta.containsKey(dataHitMetricName)); - } } - private String getMetricName(SchemaMetrics metrics, BlockCategory category) { - String hitsMetricName = - metrics.getBlockMetricName(category, SchemaMetrics.NO_COMPACTION, - BlockMetricType.CACHE_HIT); - return hitsMetricName; - } private void writeTestData(HRegion region) throws IOException { for (int i = 0; i < NUM_ROWS; ++i) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java index 6d6ba796cb5..141ef10ca39 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java @@ -34,8 +34,6 @@ import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext; import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext; import org.apache.hadoop.hbase.io.encoding.RedundantKVGenerator; -import org.apache.hadoop.hbase.regionserver.metrics.SchemaConfigured; -import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; import org.apache.hadoop.hbase.util.ChecksumType; import org.junit.After; import org.junit.Before; @@ -53,8 +51,6 @@ public class TestHFileDataBlockEncoder { new HBaseTestingUtility(); private HFileDataBlockEncoderImpl blockEncoder; private RedundantKVGenerator generator = new RedundantKVGenerator(); - private SchemaConfigured UNKNOWN_TABLE_AND_CF = - SchemaConfigured.createUnknown(); private boolean includesMemstoreTS; /** @@ -76,7 +72,6 @@ public class TestHFileDataBlockEncoder { @Before public void setUp() { conf = TEST_UTIL.getConfiguration(); - SchemaMetrics.configureGlobally(conf); } /** @@ -162,7 +157,6 @@ public class TestHFileDataBlockEncoder { HFileBlock b = new HFileBlock(BlockType.DATA, size, size, -1, buf, HFileBlock.FILL_HEADER, 0, includesMemstoreTS, HFileReaderV2.MAX_MINOR_VERSION, 0, ChecksumType.NULL.getCode(), 0); - UNKNOWN_TABLE_AND_CF.passSchemaMetricsTo(b); return b; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileReaderV1.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileReaderV1.java index f91c83e394f..a36fb546bb7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileReaderV1.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileReaderV1.java @@ -29,7 +29,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.SmallTests; import org.apache.hadoop.hbase.io.compress.Compression; -import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; import org.apache.hadoop.hbase.util.Bytes; import org.junit.After; @@ -47,21 +46,13 @@ public class TestHFileReaderV1 { private Configuration conf; private FileSystem fs; - private Map startingMetrics; private static final int N = 1000; @Before public void setUp() throws IOException { - startingMetrics = SchemaMetrics.getMetricsSnapshot(); conf = TEST_UTIL.getConfiguration(); fs = FileSystem.get(conf); - SchemaMetrics.configureGlobally(conf); - } - - @After - public void tearDown() throws Exception { - SchemaMetrics.validateMetricChanges(startingMetrics); } @Test diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java index ebb78bf4d2f..290ba4a994d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java @@ -26,11 +26,10 @@ import java.util.Collection; import java.util.Map; import java.util.Random; +import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.SmallTests; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.hfile.LruBlockCache.EvictionThread; -import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; -import org.apache.hadoop.hbase.regionserver.metrics.TestSchemaMetrics; import org.apache.hadoop.hbase.util.ClassSize; import org.junit.After; import org.junit.Before; @@ -47,30 +46,9 @@ import org.junit.runners.Parameterized.Parameters; * evictions run when they're supposed to and do what they should, * and that cached blocks are accessible when expected to be. */ -@RunWith(Parameterized.class) @Category(SmallTests.class) public class TestLruBlockCache { - private Map startingMetrics; - - public TestLruBlockCache(boolean useTableName) { - SchemaMetrics.setUseTableNameInTest(useTableName); - } - - @Parameters - public static Collection parameters() { - return TestSchemaMetrics.parameters(); - } - - @Before - public void setUp() throws Exception { - startingMetrics = SchemaMetrics.getMetricsSnapshot(); - } - - @After - public void tearDown() throws Exception { - SchemaMetrics.validateMetricChanges(startingMetrics); - } @Test public void testBackgroundEvictionThread() throws Exception { @@ -672,16 +650,6 @@ public class TestLruBlockCache { + ClassSize.align(size); } - @Override - public BlockType getBlockType() { - return BlockType.DATA; - } - - @Override - public SchemaMetrics getSchemaMetrics() { - return SchemaMetrics.getUnknownInstanceForTest(); - } - @Override public int getSerializedLength() { return 0; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java index 7fb7a7c057f..0179724193b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java @@ -39,8 +39,6 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.InternalScanner; -import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; -import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics.BlockMetricType; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Threads; import org.junit.Test; @@ -146,9 +144,6 @@ public class TestScannerSelectionUsingTTL { Set accessedFiles = cache.getCachedFileNamesForTest(); LOG.debug("Files accessed during scan: " + accessedFiles); - Map metricsBeforeCompaction = - SchemaMetrics.getMetricsSnapshot(); - // Exercise both compaction codepaths. if (explicitCompaction) { region.getStore(FAMILY_BYTES).compactRecentForTesting(totalNumFiles); @@ -156,18 +151,6 @@ public class TestScannerSelectionUsingTTL { region.compactStores(); } - SchemaMetrics.validateMetricChanges(metricsBeforeCompaction); - Map compactionMetrics = - SchemaMetrics.diffMetrics(metricsBeforeCompaction, - SchemaMetrics.getMetricsSnapshot()); - long compactionDataBlocksRead = SchemaMetrics.getLong( - compactionMetrics, - SchemaMetrics.getInstance(TABLE, FAMILY).getBlockMetricName( - BlockCategory.DATA, true, BlockMetricType.READ_COUNT)); - assertEquals("Invalid number of blocks accessed during compaction. " + - "We only expect non-expired files to be accessed.", - numFreshFiles, compactionDataBlocksRead); region.close(); } - } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetrics.java index 73294977062..7c93b8cba2a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetrics.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetrics.java @@ -19,25 +19,16 @@ package org.apache.hadoop.hbase.master; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CompatibilityFactory; -import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.MiniHBaseCluster; -import org.apache.hadoop.hbase.master.metrics.MasterMetricsSource; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.test.MetricsAssertHelper; -import org.apache.hadoop.hbase.util.Threads; -import org.apache.log4j.Level; -import org.apache.log4j.Logger; -import org.junit.After; import org.junit.AfterClass; -import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -95,18 +86,18 @@ public class TestMasterMetrics { @Test public void testDefaultMasterMetrics() throws Exception { - MasterMetricsSource source = master.getMetrics().getMetricsSource(); - metricsHelper.assertGauge( "numRegionServers", 1, source); - metricsHelper.assertGauge( "averageLoad", 2, source); - metricsHelper.assertGauge( "numDeadRegionServers", 0, source); + MetricsMasterSource masterSource = master.getMetrics().getMetricsSource(); + metricsHelper.assertGauge( "numRegionServers", 1, masterSource); + metricsHelper.assertGauge( "averageLoad", 2, masterSource); + metricsHelper.assertGauge( "numDeadRegionServers", 0, masterSource); - metricsHelper.assertGauge("masterStartTime", master.getMasterStartTime(), source); - metricsHelper.assertGauge("masterActiveTime", master.getMasterActiveTime(), source); + metricsHelper.assertGauge("masterStartTime", master.getMasterStartTime(), masterSource); + metricsHelper.assertGauge("masterActiveTime", master.getMasterActiveTime(), masterSource); - metricsHelper.assertTag("isActiveMaster", "true", source); - metricsHelper.assertTag("serverName", master.getServerName().toString(), source); - metricsHelper.assertTag("clusterId", master.getClusterId(), source); - metricsHelper.assertTag("zookeeperQuorum", master.getZooKeeper().getQuorum(), source); + metricsHelper.assertTag("isActiveMaster", "true", masterSource); + metricsHelper.assertTag("serverName", master.getServerName().toString(), masterSource); + metricsHelper.assertTag("clusterId", master.getClusterId(), masterSource); + metricsHelper.assertTag("zookeeperQuorum", master.getZooKeeper().getQuorum(), masterSource); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/metrics/TestMasterMetricsWrapper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetricsWrapper.java similarity index 90% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/master/metrics/TestMasterMetricsWrapper.java rename to hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetricsWrapper.java index abbd329a133..52ab4bc4172 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/metrics/TestMasterMetricsWrapper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetricsWrapper.java @@ -15,14 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hbase.master.metrics; +package org.apache.hadoop.hbase.master; import junit.framework.Assert; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.master.HMaster; -import org.apache.hadoop.hbase.master.metrics.MasterMetricsWrapperImpl; import org.apache.hadoop.hbase.MediumTests; +import org.apache.hadoop.hbase.master.MetricsMasterWrapperImpl; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @@ -47,13 +47,13 @@ public class TestMasterMetricsWrapper { @Test public void testInfo() { HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); - MasterMetricsWrapperImpl info = new MasterMetricsWrapperImpl(master); + MetricsMasterWrapperImpl info = new MetricsMasterWrapperImpl(master); Assert.assertEquals(master.getAverageLoad(), info.getAverageLoad()); Assert.assertEquals(master.getClusterId(), info.getClusterId()); Assert.assertEquals(master.getMasterActiveTime(), - info.getMasterActiveTime()); + info.getActiveTime()); Assert.assertEquals(master.getMasterStartTime(), - info.getMasterStartTime()); + info.getStartTime()); Assert.assertEquals(master.getCoprocessors().length, info.getCoprocessors().length); Assert.assertEquals(master.getServerManager().getOnlineServersList().size(), diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/metrics/TestExactCounterMetric.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/metrics/TestExactCounterMetric.java index edbb4b8bede..625f96c604b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/metrics/TestExactCounterMetric.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/metrics/TestExactCounterMetric.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.SmallTests; import org.junit.Test; import org.junit.experimental.categories.Category; +@Deprecated @Category(SmallTests.class) public class TestExactCounterMetric { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/metrics/TestExponentiallyDecayingSample.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/metrics/TestExponentiallyDecayingSample.java index 010e4ac70b3..d997fe74737 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/metrics/TestExponentiallyDecayingSample.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/metrics/TestExponentiallyDecayingSample.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.SmallTests; import org.junit.Test; import org.junit.experimental.categories.Category; +@Deprecated @Category(SmallTests.class) public class TestExponentiallyDecayingSample { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/metrics/TestMetricsHistogram.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/metrics/TestMetricsHistogram.java index d992aaf944e..d06c989cec5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/metrics/TestMetricsHistogram.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/metrics/TestMetricsHistogram.java @@ -28,6 +28,7 @@ import org.junit.Assert; import org.junit.Test; import org.junit.experimental.categories.Category; +@Deprecated @Category(SmallTests.class) public class TestMetricsHistogram { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/metrics/TestMetricsMBeanBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/metrics/TestMetricsMBeanBase.java index 481a09ea5d0..0812ab6054a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/metrics/TestMetricsMBeanBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/metrics/TestMetricsMBeanBase.java @@ -45,6 +45,7 @@ import org.junit.experimental.categories.Category; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; +@Deprecated @Category(SmallTests.class) public class TestMetricsMBeanBase extends TestCase { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperStub.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperStub.java new file mode 100644 index 00000000000..1c0883413f6 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperStub.java @@ -0,0 +1,194 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.regionserver; + +public class MetricsRegionServerWrapperStub implements MetricsRegionServerWrapper { + + @Override + public String getServerName() { + return "test"; + } + + @Override + public String getClusterId() { + return "tClusterId"; + } + + @Override + public String getZookeeperQuorum() { + return "zk"; + } + + @Override + public String getCoprocessors() { + return "co-process"; + } + + @Override + public long getStartCode() { + return 100; + } + + @Override + public long getNumOnlineRegions() { + return 101; + } + + @Override + public long getNumStores() { + return 2; + } + + @Override + public long getNumStoreFiles() { + return 300; + } + + @Override + public long getMemstoreSize() { + return 1025; + } + + @Override + public long getStoreFileSize() { + return 1900; + } + + @Override + public double getRequestsPerSecond() { + return 0; + } + + @Override + public long getTotalRequestCount() { + return 899; + } + + @Override + public long getReadRequestsCount() { + return 997; + } + + @Override + public long getWriteRequestsCount() { + return 707; + } + + @Override + public long getCheckAndMutateChecksFailed() { + return 401; + } + + @Override + public long getCheckAndMutateChecksPassed() { + return 405; + } + + @Override + public long getStoreFileIndexSize() { + return 406; + } + + @Override + public long getTotalStaticIndexSize() { + return 407; + } + + @Override + public long getTotalStaticBloomSize() { + return 408; + } + + @Override + public long getNumPutsWithoutWAL() { + return 409; + } + + @Override + public long getDataInMemoryWithoutWAL() { + return 410; + } + + @Override + public int getPercentFileLocal() { + return 99; + } + + @Override + public int getCompactionQueueSize() { + return 411; + } + + @Override + public int getFlushQueueSize() { + return 412; + } + + @Override + public long getBlockCacheFreeSize() { + return 413; + } + + @Override + public long getBlockCacheCount() { + return 414; + } + + @Override + public long getBlockCacheSize() { + return 415; + } + + @Override + public long getBlockCacheHitCount() { + return 416; + } + + @Override + public long getBlockCacheMissCount() { + return 417; + } + + @Override + public long getBlockCacheEvictedCount() { + return 418; + } + + @Override + public int getBlockCacheHitPercent() { + return 98; + } + + @Override + public int getBlockCacheHitCachingPercent() { + return 97; + } + + + @Override + public long getUpdatesBlockedTime() { + return 419; + } + + @Override + public void forceRecompute() { + //IGNORED. + } + +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MXBeanImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperStub.java similarity index 56% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MXBeanImpl.java rename to hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperStub.java index 78f3b6f1608..eebec6a8c49 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MXBeanImpl.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperStub.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,40 +15,48 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + package org.apache.hadoop.hbase.regionserver; -/** - * Impl for exposing Region Server Information through JMX - */ -public class MXBeanImpl implements MXBean { +public class MetricsRegionWrapperStub implements MetricsRegionWrapper { - private final HRegionServer regionServer; - - private static MXBeanImpl instance = null; - public synchronized static MXBeanImpl init(final HRegionServer rs){ - if (instance == null) { - instance = new MXBeanImpl(rs); - } - return instance; - } - - protected MXBeanImpl(final HRegionServer rs) { - this.regionServer = rs; + @Override + public String getTableName() { + return "MetricsRegionWrapperStub"; } @Override - public String[] getCoprocessors() { - return regionServer.getCoprocessors(); + public String getRegionName() { + return "DEADBEEF001"; } @Override - public String getZookeeperQuorum() { - return regionServer.getZooKeeper().getQuorum(); + public long getNumStores() { + return 101; } @Override - public String getServerName() { - return regionServer.getServerName().getServerName(); + public long getNumStoreFiles() { + return 102; } + @Override + public long getMemstoreSize() { + return 103; + } + + @Override + public long getStoreFileSize() { + return 104; + } + + @Override + public long getReadRequestCount() { + return 105; + } + + @Override + public long getWriteRequestCount() { + return 106; + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksScanned.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksScanned.java index 562935988fd..4169b802c80 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksScanned.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksScanned.java @@ -28,9 +28,6 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.SmallTests; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.compress.Compression; -import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory; -import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; -import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics.BlockMetricType; import org.apache.hadoop.hbase.util.Bytes; import org.junit.Assert; import org.junit.Test; @@ -52,7 +49,7 @@ public class TestBlocksScanned extends HBaseTestCase { @Override public void setUp() throws Exception { super.setUp(); - SchemaMetrics.setUseTableNameInTest(true); + TEST_UTIL = new HBaseTestingUtility(); TESTTABLEDESC = new HTableDescriptor(TABLE); @@ -72,11 +69,6 @@ public class TestBlocksScanned extends HBaseTestCase { addContent(r, FAMILY, COL); r.flushcache(); - // Get the per-cf metrics - SchemaMetrics schemaMetrics = - SchemaMetrics.getInstance(Bytes.toString(TABLE), Bytes.toString(FAMILY)); - Map schemaMetricSnapshot = SchemaMetrics.getMetricsSnapshot(); - // Do simple test of getting one row only first. Scan scan = new Scan(Bytes.toBytes("aaa"), Bytes.toBytes("aaz")); scan.addColumn(FAMILY, COL); @@ -92,26 +84,6 @@ public class TestBlocksScanned extends HBaseTestCase { int kvPerBlock = (int) Math.ceil(BLOCK_SIZE / (double) results.get(0).getLength()); Assert.assertEquals(2, kvPerBlock); - - long expectDataBlockRead = (long) Math.ceil(expectResultSize / (double) kvPerBlock); - long expectIndexBlockRead = expectDataBlockRead; - - verifyDataAndIndexBlockRead(schemaMetricSnapshot, schemaMetrics, - expectDataBlockRead, expectIndexBlockRead); } - private void verifyDataAndIndexBlockRead(Map previousMetricSnapshot, - SchemaMetrics schemaMetrics, long expectDataBlockRead, long expectedIndexBlockRead){ - Map currentMetricsSnapshot = SchemaMetrics.getMetricsSnapshot(); - Map diffs = - SchemaMetrics.diffMetrics(previousMetricSnapshot, currentMetricsSnapshot); - - long dataBlockRead = SchemaMetrics.getLong(diffs, - schemaMetrics.getBlockMetricName(BlockCategory.DATA, false, BlockMetricType.READ_COUNT)); - long indexBlockRead = SchemaMetrics.getLong(diffs, - schemaMetrics.getBlockMetricName(BlockCategory.INDEX, false, BlockMetricType.READ_COUNT)); - - Assert.assertEquals(expectDataBlockRead, dataBlockRead); - Assert.assertEquals(expectedIndexBlockRead, indexBlockRead); - } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java index 1fb16cdd564..17cd2d8ed68 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java @@ -194,7 +194,6 @@ public class TestCacheOnWriteInSchema { BlockCache cache = cacheConf.getBlockCache(); StoreFile sf = new StoreFile(fs, path, conf, cacheConf, BloomType.ROWCOL, null); - store.passSchemaMetricsTo(sf); HFileReaderV2 reader = (HFileReaderV2) sf.createReader().getHFileReader(); try { // Open a scanner with (on read) caching disabled diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index 022c777df83..d5a3149cde0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -78,7 +78,6 @@ import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.monitoring.TaskMonitor; import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl; import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType; -import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; import org.apache.hadoop.hbase.regionserver.wal.HLog; import org.apache.hadoop.hbase.regionserver.wal.HLogFactory; import org.apache.hadoop.hbase.regionserver.wal.HLogUtil; @@ -132,14 +131,11 @@ public class TestHRegion extends HBaseTestCase { protected final byte [] row2 = Bytes.toBytes("rowB"); - private Map startingMetrics; - /** * @see org.apache.hadoop.hbase.HBaseTestCase#setUp() */ @Override protected void setUp() throws Exception { - startingMetrics = SchemaMetrics.getMetricsSnapshot(); super.setUp(); } @@ -147,7 +143,6 @@ public class TestHRegion extends HBaseTestCase { protected void tearDown() throws Exception { super.tearDown(); EnvironmentEdgeManagerTestHelper.reset(); - SchemaMetrics.validateMetricChanges(startingMetrics); } ////////////////////////////////////////////////////////////////////////////// @@ -3331,9 +3326,6 @@ public class TestHRegion extends HBaseTestCase { info = new HRegionInfo(htd.getName(), HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, false); Path path = new Path(DIR + "testStatusSettingToAbortIfAnyExceptionDuringRegionInitilization"); - // no where we are instantiating HStore in this test case so useTableNameGlobally is null. To - // avoid NullPointerException we are setting useTableNameGlobally to false. - SchemaMetrics.setUseTableNameInTest(false); region = HRegion.newHRegion(path, null, fs, conf, info, htd, null); // region initialization throws IOException and set task state to ABORTED. region.initialize(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMXBean.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMXBean.java deleted file mode 100644 index 9d5a531f0d6..00000000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMXBean.java +++ /dev/null @@ -1,62 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.regionserver; - -import junit.framework.Assert; - -import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.master.HMaster; -import org.apache.hadoop.hbase.MediumTests; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -@Category(MediumTests.class) -public class TestMXBean { - - private static final HBaseTestingUtility TEST_UTIL = - new HBaseTestingUtility(); - - @BeforeClass - public static void setup() throws Exception { - TEST_UTIL.startMiniCluster(1, 1); - } - - @AfterClass - public static void teardown() throws Exception { - TEST_UTIL.shutdownMiniCluster(); - } - - @Test - public void testInfo() { - HRegionServer rs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0); - HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); - MXBeanImpl info = MXBeanImpl.init(rs); - - Assert.assertEquals(rs.getServerName().getServerName(), - info.getServerName()); - Assert.assertEquals(rs.getCoprocessors().length, - info.getCoprocessors().length); - rs.getConfiguration().setInt("hbase.master.info.port", - master.getServerName().getPort()); - Assert.assertEquals(rs.getZooKeeperWatcher().getQuorum(), - info.getZookeeperQuorum()); - } - -} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStore.java index f1ab78870ef..96f90cf6aa7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStore.java @@ -35,7 +35,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.regionserver.HStore.ScanInfo; -import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; import org.apache.hadoop.hbase.util.Bytes; import com.google.common.base.Joiner; @@ -61,7 +60,6 @@ public class TestMemStore extends TestCase { super.setUp(); this.mvcc = new MultiVersionConsistencyControl(); this.memstore = new MemStore(); - SchemaMetrics.setUseTableNameInTest(false); } public void testPutSameKey() { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegion.java new file mode 100644 index 00000000000..4869a8434d0 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegion.java @@ -0,0 +1,25 @@ +package org.apache.hadoop.hbase.regionserver; + +import org.apache.hadoop.hbase.CompatibilityFactory; +import org.apache.hadoop.hbase.SmallTests; +import org.apache.hadoop.hbase.test.MetricsAssertHelper; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category(SmallTests.class) +public class TestMetricsRegion { + + + public MetricsAssertHelper HELPER = CompatibilityFactory.getInstance(MetricsAssertHelper.class); + + @Test + public void testRegionWrapperMetrics() { + MetricsRegion mr = new MetricsRegion(new MetricsRegionWrapperStub()); + MetricsRegionAggregateSource agg = mr.getSource().getAggregateSource(); + + HELPER.assertGauge("table.MetricsRegionWrapperStub.region.DEADBEEF001.storeCount", 101, agg); + HELPER.assertGauge("table.MetricsRegionWrapperStub.region.DEADBEEF001.storeFileCount", 102, agg); + HELPER.assertGauge("table.MetricsRegionWrapperStub.region.DEADBEEF001.memstoreSize", 103, agg); + mr.close(); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServer.java new file mode 100644 index 00000000000..9562fa58f5c --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServer.java @@ -0,0 +1,83 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import org.apache.hadoop.hbase.CompatibilityFactory; +import org.apache.hadoop.hbase.SmallTests; +import org.apache.hadoop.hbase.regionserver.MetricsRegionServer; +import org.apache.hadoop.hbase.regionserver.MetricsRegionServerWrapperStub; +import org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource; +import org.apache.hadoop.hbase.test.MetricsAssertHelper; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import static org.junit.Assert.assertNotNull; + +/** + * Unit test version of rs metrics tests. + */ +@Category(SmallTests.class) +public class TestMetricsRegionServer { + + public MetricsAssertHelper HELPER = CompatibilityFactory.getInstance(MetricsAssertHelper.class); + + @Test + public void testWrapperSource() { + + MetricsRegionServer rsm = new MetricsRegionServer(new MetricsRegionServerWrapperStub()); + MetricsRegionServerSource serverSource = rsm.getMetricsSource(); + HELPER.assertTag("serverName", "test", serverSource); + HELPER.assertTag("clusterId", "tClusterId", serverSource); + HELPER.assertTag("zookeeperQuorum", "zk", serverSource); + HELPER.assertGauge("regionServerStartTime", 100, serverSource); + HELPER.assertGauge("regionCount", 101, serverSource); + HELPER.assertGauge("storeCount", 2, serverSource); + HELPER.assertGauge("storeFileCount", 300, serverSource); + HELPER.assertGauge("memstoreSize", 1025, serverSource); + HELPER.assertGauge("storeFileSize", 1900, serverSource); + HELPER.assertCounter("totalRequestCount", 899, serverSource); + HELPER.assertCounter("readRequestCount", 997, serverSource); + HELPER.assertCounter("writeRequestCount", 707, serverSource); + HELPER.assertCounter("checkMutateFailedCount", 401, serverSource); + HELPER.assertCounter("checkMutatePassedCount", 405, serverSource); + HELPER.assertGauge("storeFileIndexSize", 406, serverSource); + HELPER.assertGauge("staticIndexSize", 407, serverSource); + HELPER.assertGauge("staticBloomSize", 408, serverSource); + HELPER.assertGauge("putsWithoutWALCount", 409, serverSource); + HELPER.assertGauge("putsWithoutWALSize", 410, serverSource); + HELPER.assertGauge("percentFilesLocal", 99, serverSource); + HELPER.assertGauge("compactionQueueLength", 411, serverSource); + HELPER.assertGauge("flushQueueLength", 412, serverSource); + HELPER.assertGauge("blockCacheFreeSize", 413, serverSource); + HELPER.assertGauge("blockCacheCount", 414, serverSource); + HELPER.assertGauge("blockCacheSize", 415, serverSource); + HELPER.assertCounter("blockCacheHitCount", 416, serverSource); + HELPER.assertCounter("blockCacheMissCount", 417, serverSource); + HELPER.assertCounter("blockCacheEvictionCount", 418, serverSource); + HELPER.assertGauge("blockCountHitPercent", 98, serverSource); + HELPER.assertGauge("blockCacheExpressHitPercent", 97, serverSource); + HELPER.assertCounter("updatesBlockedTime", 419, serverSource); + } + + @Test + public void testConstuctor() { + MetricsRegionServer rsm = new MetricsRegionServer(new MetricsRegionServerWrapperStub()); + assertNotNull("There should be a hadoop1/hadoop2 metrics source", rsm.getMetricsSource() ); + assertNotNull("The RegionServerMetricsWrapper should be accessable", rsm.getRegionServerWrapper()); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java index b81c42f1fc6..12b86f26073 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java @@ -47,8 +47,6 @@ import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; -import org.apache.hadoop.hbase.io.hfile.HFile; -import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; import org.apache.hadoop.hbase.util.Bytes; import org.junit.Before; import org.junit.Test; @@ -121,12 +119,6 @@ public class TestMultiColumnScanner { assertTrue(TIMESTAMPS[i] < TIMESTAMPS[i + 1]); } - @Before - public void setUp() { - SchemaMetrics.configureGlobally(TEST_UTIL.getConfiguration()); - } - - @Parameters public static final Collection parameters() { List parameters = new ArrayList(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSStatusServlet.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSStatusServlet.java index a0ee77f938a..49a7da5c2ce 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSStatusServlet.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSStatusServlet.java @@ -31,7 +31,6 @@ import org.apache.hadoop.hbase.protobuf.ResponseConverter; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse; -import org.apache.hadoop.hbase.regionserver.metrics.RegionServerMetrics; import org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker; @@ -59,8 +58,7 @@ public class TestRSStatusServlet { new ServerName("localhost", FAKE_IPC_PORT, 11111); private final GetServerInfoResponse fakeResponse = ResponseConverter.buildGetServerInfoResponse(fakeServerName, FAKE_WEB_PORT); - private final RegionServerMetrics metrics = - new RegionServerMetrics(); + private final ServerName fakeMasterAddress = new ServerName("localhost", 60010, 1212121212); @@ -71,8 +69,6 @@ public class TestRSStatusServlet { .when(rs).getConfiguration(); Mockito.doReturn(fakeResponse).when(rs).getServerInfo( (RpcController)Mockito.any(), (GetServerInfoRequest)Mockito.any()); - Mockito.doReturn(metrics).when(rs).getMetrics(); - // Fake ZKW ZooKeeperWatcher zkw = Mockito.mock(ZooKeeperWatcher.class); Mockito.doReturn("fakequorum").when(zkw).getQuorum(); @@ -82,6 +78,10 @@ public class TestRSStatusServlet { MasterAddressTracker mat = Mockito.mock(MasterAddressTracker.class); Mockito.doReturn(fakeMasterAddress).when(mat).getMasterAddress(); Mockito.doReturn(mat).when(rs).getMasterAddressManager(); + + MetricsRegionServer rms = Mockito.mock(MetricsRegionServer.class); + Mockito.doReturn(new MetricsRegionServerWrapperStub()).when(rms).getRegionServerWrapper(); + Mockito.doReturn(rms).when(rs).getMetrics(); } @Test diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java index 822f5b276f2..e5d33bdcade 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java @@ -1,393 +1,301 @@ -/* - * Copyright The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.regionserver; -import static org.junit.Assert.assertEquals; - -import java.io.IOException; -import java.util.Arrays; -import java.util.Map; -import java.util.Set; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.MediumTests; -import org.apache.hadoop.hbase.client.Append; -import org.apache.hadoop.hbase.client.Delete; -import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.client.HTable; -import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.regionserver.metrics.RegionMetricsStorage; -import org.apache.hadoop.hbase.regionserver.metrics.RegionServerMetrics; -import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; -import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics. - StoreMetricType; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.client.*; +import org.apache.hadoop.hbase.test.MetricsAssertHelper; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.Pair; -import org.junit.After; -import org.junit.Before; +import org.apache.hadoop.hbase.util.Threads; +import org.apache.log4j.Level; +import org.apache.log4j.Logger; +import org.junit.AfterClass; +import org.junit.BeforeClass; import org.junit.Test; import org.junit.experimental.categories.Category; +import java.io.IOException; -/** - * Test metrics incremented on region server operations. - */ @Category(MediumTests.class) public class TestRegionServerMetrics { + private static final Log LOG = LogFactory.getLog(TestRegionServerMetrics.class); + private static MetricsAssertHelper metricsHelper; - private static final Log LOG = - LogFactory.getLog(TestRegionServerMetrics.class.getName()); - - private final static String TABLE_NAME = - TestRegionServerMetrics.class.getSimpleName() + "Table"; - private String[] FAMILIES = new String[] { "cf1", "cf2", "anotherCF" }; - private static final int MAX_VERSIONS = 1; - private static final int NUM_COLS_PER_ROW = 15; - private static final int NUM_FLUSHES = 3; - private static final int NUM_REGIONS = 4; - - private static final SchemaMetrics ALL_METRICS = - SchemaMetrics.ALL_SCHEMA_METRICS; - - private final HBaseTestingUtility TEST_UTIL = - new HBaseTestingUtility(); - - private Map startingMetrics; - - private final int META_AND_ROOT = 2; - - @Before - public void setUp() throws Exception { - SchemaMetrics.setUseTableNameInTest(true); - startingMetrics = SchemaMetrics.getMetricsSnapshot(); - TEST_UTIL.startMiniCluster(); + static { + Logger.getLogger("org.apache.hadoop.hbase").setLevel(Level.DEBUG); } - @After - public void tearDown() throws Exception { - TEST_UTIL.shutdownMiniCluster(); - SchemaMetrics.validateMetricChanges(startingMetrics); - } + private static MiniHBaseCluster cluster; + private static HRegionServer rs; + private static Configuration conf; + private static HBaseTestingUtility TEST_UTIL; + private static MetricsRegionServer metricsRegionServer; + private static MetricsRegionServerSource serverSource; - private void assertTimeVaryingMetricCount(int expectedCount, String table, String cf, - String regionName, String metricPrefix) { + @BeforeClass + public static void startCluster() throws Exception { + metricsHelper = CompatibilityFactory.getInstance(MetricsAssertHelper.class); + TEST_UTIL = new HBaseTestingUtility(); + conf = TEST_UTIL.getConfiguration(); + conf.getLong("hbase.splitlog.max.resubmit", 0); + // Make the failure test faster + conf.setInt("zookeeper.recovery.retry", 0); + conf.setInt(HConstants.REGIONSERVER_INFO_PORT, -1); - Integer expectedCountInteger = new Integer(expectedCount); + TEST_UTIL.startMiniCluster(1, 1); + cluster = TEST_UTIL.getHBaseCluster(); - if (cf != null) { - String cfKey = - SchemaMetrics.TABLE_PREFIX + table + "." + - SchemaMetrics.CF_PREFIX + cf + "." + metricPrefix; - Pair cfPair = RegionMetricsStorage.getTimeVaryingMetric(cfKey); - assertEquals(expectedCountInteger, cfPair.getSecond()); + cluster.waitForActiveAndReadyMaster(); + + while (cluster.getLiveRegionServerThreads().size() < 1) { + Threads.sleep(100); } - if (regionName != null) { - String rKey = - SchemaMetrics.TABLE_PREFIX + table + "." + - SchemaMetrics.REGION_PREFIX + regionName + "." + metricPrefix; + rs = cluster.getRegionServer(0); + metricsRegionServer = rs.getMetrics(); + serverSource = metricsRegionServer.getMetricsSource(); + } - Pair regionPair = RegionMetricsStorage.getTimeVaryingMetric(rKey); - assertEquals(expectedCountInteger, regionPair.getSecond()); + @AfterClass + public static void after() throws Exception { + if (TEST_UTIL != null) { + TEST_UTIL.shutdownMiniCluster(); } } - - private void assertStoreMetricEquals(long expected, - SchemaMetrics schemaMetrics, StoreMetricType storeMetricType) { - final String storeMetricName = - schemaMetrics.getStoreMetricName(storeMetricType); - Long startValue = startingMetrics.get(storeMetricName); - assertEquals("Invalid value for store metric " + storeMetricName - + " (type " + storeMetricType + ")", expected, - RegionMetricsStorage.getNumericMetric(storeMetricName) - - (startValue != null ? startValue : 0)); + + @Test(timeout = 300000) + public void testRegionCount() throws Exception { + String regionMetricsKey = "regionCount"; + long regions = metricsHelper.getGaugeLong(regionMetricsKey, serverSource); + // Creating a table should add one region + TEST_UTIL.createTable(Bytes.toBytes("table"), Bytes.toBytes("cf")); + metricsHelper.assertGaugeGt(regionMetricsKey, regions, serverSource); } @Test - public void testOperationMetrics() throws IOException { - String cf = "OPCF"; - String otherCf = "otherCF"; - String rk = "testRK"; - String icvCol = "icvCol"; - String appendCol = "appendCol"; - String regionName = null; - HTable hTable = - TEST_UTIL.createTable(TABLE_NAME.getBytes(), - new byte[][] { cf.getBytes(), otherCf.getBytes() }); - Set regionInfos = hTable.getRegionLocations().keySet(); - - regionName = regionInfos.toArray(new HRegionInfo[regionInfos.size()])[0].getEncodedName(); - - //Do a multi put that has one cf. Since they are in different rk's - //The lock will still be obtained and everything will be applied in one multiput. - Put pOne = new Put(rk.getBytes()); - pOne.add(cf.getBytes(), icvCol.getBytes(), Bytes.toBytes(0L)); - Put pTwo = new Put("ignored1RK".getBytes()); - pTwo.add(cf.getBytes(), "ignored".getBytes(), Bytes.toBytes(0L)); - - hTable.put(Arrays.asList(new Put[] {pOne, pTwo})); - - // Do a multiput where the cf doesn't stay consistent. - Put pThree = new Put("ignored2RK".getBytes()); - pThree.add(cf.getBytes(), "ignored".getBytes(), Bytes.toBytes("TEST1")); - Put pFour = new Put("ignored3RK".getBytes()); - pFour.add(otherCf.getBytes(), "ignored".getBytes(), Bytes.toBytes(0L)); - - hTable.put(Arrays.asList(new Put[] { pThree, pFour })); - - hTable.incrementColumnValue(rk.getBytes(), cf.getBytes(), icvCol.getBytes(), 1L); - - Get g = new Get(rk.getBytes()); - g.addColumn(cf.getBytes(), appendCol.getBytes()); - hTable.get(g); - - Append a = new Append(rk.getBytes()); - a.add(cf.getBytes(), appendCol.getBytes(), Bytes.toBytes("-APPEND")); - hTable.append(a); - - Delete dOne = new Delete(rk.getBytes()); - dOne.deleteFamily(cf.getBytes()); - hTable.delete(dOne); - - Delete dTwo = new Delete(rk.getBytes()); - hTable.delete(dTwo); - - // There should be one multi put where the cf is consistent - assertTimeVaryingMetricCount(1, TABLE_NAME, cf, null, "multiput_"); - - // There were two multiputs to the cf. - assertTimeVaryingMetricCount(2, TABLE_NAME, null, regionName, "multiput_"); - - // There was one multiput where the cf was not consistent. - assertTimeVaryingMetricCount(1, TABLE_NAME, "__unknown", null, "multiput_"); - - // One increment and one append - assertTimeVaryingMetricCount(1, TABLE_NAME, cf, regionName, "increment_"); - assertTimeVaryingMetricCount(1, TABLE_NAME, cf, regionName, "append_"); - - // One delete where the cf is known - assertTimeVaryingMetricCount(1, TABLE_NAME, cf, null, "multidelete_"); - - // two deletes in the region. - assertTimeVaryingMetricCount(2, TABLE_NAME, null, regionName, "multidelete_"); - - // Three gets. one for gets. One for append. One for increment. - assertTimeVaryingMetricCount(3, TABLE_NAME, cf, regionName, "get_"); - - } - - private void assertCheckAndMutateMetrics(final HRegionServer rs, - long expectedPassed, long expectedFailed) { - rs.doMetrics(); - RegionServerMetrics metrics = rs.getMetrics(); - assertEquals("checkAndMutatePassed metrics incorrect", - expectedPassed, metrics.checkAndMutateChecksPassed.get()); - assertEquals("checkAndMutateFailed metrics incorrect", - expectedFailed, metrics.checkAndMutateChecksFailed.get()); + public void testLocalFiles() throws Exception { + metricsHelper.assertGauge("percentFilesLocal", 0, serverSource); } @Test - public void testCheckAndMutateMetrics() throws Exception { - final HRegionServer rs = - TEST_UTIL.getMiniHBaseCluster().getRegionServer(0); - byte [] tableName = Bytes.toBytes("testCheckAndMutateMetrics"); - byte [] family = Bytes.toBytes("family"); - byte [] qualifier = Bytes.toBytes("qualifier"); - byte [] row = Bytes.toBytes("row1"); - HTable table = TEST_UTIL.createTable(tableName, family); - long expectedPassed = 0; - long expectedFailed = 0; + public void testRequestCount() throws Exception { + String tableNameString = "testRequestCount"; + byte[] tName = Bytes.toBytes(tableNameString); + byte[] cfName = Bytes.toBytes("d"); + byte[] row = Bytes.toBytes("rk"); + byte[] qualifier = Bytes.toBytes("qual"); + byte[] initValue = Bytes.toBytes("Value"); + byte[] nextValue = Bytes.toBytes("NEXT VAL"); - // checkAndPut success - Put put = new Put(row); - byte [] val1 = Bytes.toBytes("val1"); - put.add(family, qualifier, val1); - table.checkAndPut(row, family, qualifier, null, put); - expectedPassed++; - assertCheckAndMutateMetrics(rs, expectedPassed, expectedFailed); - // checkAndPut failure - byte [] val2 = Bytes.toBytes("val2"); - table.checkAndPut(row, family, qualifier, val2, put); - expectedFailed++; - assertCheckAndMutateMetrics(rs, expectedPassed, expectedFailed); + TEST_UTIL.createTable(tName, cfName); - // checkAndDelete success - Delete delete = new Delete(row); - delete.deleteColumn(family, qualifier); - table.checkAndDelete(row, family, qualifier, val1, delete); - expectedPassed++; - assertCheckAndMutateMetrics(rs, expectedPassed, expectedFailed); + new HTable(conf, tName).close(); //wait for the table to come up. + metricsRegionServer.getRegionServerWrapper().forceRecompute(); + long requests = metricsHelper.getCounter("totalRequestCount", serverSource); + long readRequests = metricsHelper.getCounter("readRequestCount", serverSource); + long writeRequests = metricsHelper.getCounter("writeRequestCount", serverSource); - // checkAndDelete failure - table.checkAndDelete(row, family, qualifier, val1, delete); - expectedFailed++; - assertCheckAndMutateMetrics(rs, expectedPassed, expectedFailed); + HTable table = new HTable(conf, tName); + + Put p = new Put(row); + + + p.add(cfName, qualifier, initValue); + + for (int i=0; i< 30; i++) { + table.put(p); + } + + + table.flushCommits(); + + Get g = new Get(row); + for (int i=0; i< 10; i++) { + table.get(g); + } + + + for ( HRegionInfo i:table.getRegionLocations().keySet()) { + MetricsRegionAggregateSource agg = rs.getRegion(i.getRegionName()) + .getMetrics() + .getSource() + .getAggregateSource(); + String prefix = "table."+tableNameString + ".region." + i.getEncodedName(); + metricsHelper.assertCounter(prefix + ".getCount", 10, agg); + metricsHelper.assertCounter(prefix + ".multiPutCount", 30, agg); + } + + + metricsRegionServer.getRegionServerWrapper().forceRecompute(); + metricsHelper.assertCounterGt("totalRequestCount", requests + 39, serverSource); + metricsHelper.assertCounterGt("readRequestCount", readRequests + 9, serverSource); + metricsHelper.assertCounterGt("writeRequestCount", writeRequests + 29, serverSource); } @Test - public void testRemoveRegionMetrics() throws IOException, InterruptedException { - String cf = "REMOVECF"; - HTable hTable = TEST_UTIL.createTable(TABLE_NAME.getBytes(), cf.getBytes()); - HRegionInfo[] regionInfos = - hTable.getRegionLocations().keySet() - .toArray(new HRegionInfo[hTable.getRegionLocations().keySet().size()]); + public void testPutsWithoutWal() throws Exception { + byte[] tableName = Bytes.toBytes("testPutsWithoutWal"); + byte[] cf = Bytes.toBytes("d"); + byte[] row = Bytes.toBytes("rk"); + byte[] qualifier = Bytes.toBytes("qual"); + byte[] val = Bytes.toBytes("Value"); - String regionName = regionInfos[0].getEncodedName(); + metricsRegionServer.getRegionServerWrapper().forceRecompute(); - // Do some operations so there are metrics. - Put pOne = new Put("TEST".getBytes()); - pOne.add(cf.getBytes(), "test".getBytes(), "test".getBytes()); - hTable.put(pOne); + TEST_UTIL.createTable(tableName, cf); - Get g = new Get("TEST".getBytes()); - g.addFamily(cf.getBytes()); - hTable.get(g); - assertTimeVaryingMetricCount(1, TABLE_NAME, cf, regionName, "get_"); - HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); - admin.disableTable(TABLE_NAME.getBytes()); - admin.deleteTable(TABLE_NAME.getBytes()); + HTable t = new HTable(conf, tableName); - assertTimeVaryingMetricCount(0, TABLE_NAME, cf, regionName, "get_"); - } - - @Test - public void testMultipleRegions() throws IOException, InterruptedException { + Put p = new Put(row); + p.add(cf, qualifier, val); + p.setWriteToWAL(false); - TEST_UTIL.createRandomTable( - TABLE_NAME, - Arrays.asList(FAMILIES), - MAX_VERSIONS, NUM_COLS_PER_ROW, NUM_FLUSHES, NUM_REGIONS, 1000); + t.put(p); + t.flushCommits(); - final HRegionServer rs = - TEST_UTIL.getMiniHBaseCluster().getRegionServer(0); - - assertEquals(NUM_REGIONS + META_AND_ROOT, ProtobufUtil.getOnlineRegions(rs).size()); - - rs.doMetrics(); - for (HRegion r : TEST_UTIL.getMiniHBaseCluster().getRegions( - Bytes.toBytes(TABLE_NAME))) { - for (Map.Entry storeEntry : r.getStores().entrySet()) { - LOG.info("For region " + r.getRegionNameAsString() + ", CF " + - Bytes.toStringBinary(storeEntry.getKey()) + " found store files " + - ": " + storeEntry.getValue().getStorefiles()); - } - } - - assertStoreMetricEquals(NUM_FLUSHES * NUM_REGIONS * FAMILIES.length - + META_AND_ROOT, ALL_METRICS, StoreMetricType.STORE_FILE_COUNT); - - for (String cf : FAMILIES) { - SchemaMetrics schemaMetrics = SchemaMetrics.getInstance(TABLE_NAME, cf); - assertStoreMetricEquals(NUM_FLUSHES * NUM_REGIONS, schemaMetrics, - StoreMetricType.STORE_FILE_COUNT); - } - - // ensure that the max value is also maintained - final String storeMetricName = ALL_METRICS - .getStoreMetricNameMax(StoreMetricType.STORE_FILE_COUNT); - assertEquals("Invalid value for store metric " + storeMetricName, - NUM_FLUSHES, RegionMetricsStorage.getNumericMetric(storeMetricName)); - } - - - - private void assertSizeMetric(String table, String[] cfs, int[] metrics) { - // we have getsize & nextsize for each column family - assertEquals(cfs.length * 2, metrics.length); - - for (int i =0; i < cfs.length; ++i) { - String prefix = SchemaMetrics.generateSchemaMetricsPrefix(table, cfs[i]); - String getMetric = prefix + SchemaMetrics.METRIC_GETSIZE; - String nextMetric = prefix + SchemaMetrics.METRIC_NEXTSIZE; - - // verify getsize and nextsize matches - int getSize = RegionMetricsStorage.getNumericMetrics().containsKey(getMetric) ? - RegionMetricsStorage.getNumericMetrics().get(getMetric).intValue() : 0; - int nextSize = RegionMetricsStorage.getNumericMetrics().containsKey(nextMetric) ? - RegionMetricsStorage.getNumericMetrics().get(nextMetric).intValue() : 0; - - assertEquals(metrics[i], getSize); - assertEquals(metrics[cfs.length + i], nextSize); - } + metricsRegionServer.getRegionServerWrapper().forceRecompute(); + metricsHelper.assertGauge("putsWithoutWALCount", 1, serverSource); + long minLength = row.length + cf.length + qualifier.length + val.length; + metricsHelper.assertGaugeGt("putsWithoutWALSize", minLength, serverSource); } @Test - public void testGetNextSize() throws IOException, InterruptedException { - String rowName = "row1"; - byte[] ROW = Bytes.toBytes(rowName); - String tableName = "SizeMetricTest"; - byte[] TABLE = Bytes.toBytes(tableName); - String cf1Name = "cf1"; - String cf2Name = "cf2"; - String[] cfs = new String[] {cf1Name, cf2Name}; - byte[] CF1 = Bytes.toBytes(cf1Name); - byte[] CF2 = Bytes.toBytes(cf2Name); + public void testStoreCount() throws Exception { + byte[] tableName = Bytes.toBytes("testStoreCount"); + byte[] cf = Bytes.toBytes("d"); + byte[] row = Bytes.toBytes("rk"); + byte[] qualifier = Bytes.toBytes("qual"); + byte[] val = Bytes.toBytes("Value"); - long ts = 1234; - HTable hTable = TEST_UTIL.createTable(TABLE, new byte[][]{CF1, CF2}); - HBaseAdmin admin = new HBaseAdmin(TEST_UTIL.getConfiguration()); + metricsRegionServer.getRegionServerWrapper().forceRecompute(); + long stores = metricsHelper.getGaugeLong("storeCount", serverSource); + long storeFiles = metricsHelper.getGaugeLong("storeFileCount", serverSource); - Put p = new Put(ROW); - p.add(CF1, CF1, ts, CF1); - p.add(CF2, CF2, ts, CF2); - hTable.put(p); + TEST_UTIL.createTable(tableName, cf); - KeyValue kv1 = new KeyValue(ROW, CF1, CF1, ts, CF1); - KeyValue kv2 = new KeyValue(ROW, CF2, CF2, ts, CF2); - int kvLength = kv1.getLength(); - assertEquals(kvLength, kv2.getLength()); + //Force a hfile. + HTable t = new HTable(conf, tableName); + Put p = new Put(row); + p.add(cf, qualifier, val); + t.put(p); + t.flushCommits(); + TEST_UTIL.getHBaseAdmin().flush(tableName); - // only cf1.getsize is set on Get - hTable.get(new Get(ROW).addFamily(CF1)); - assertSizeMetric(tableName, cfs, new int[] {kvLength, 0, 0, 0}); + metricsRegionServer.getRegionServerWrapper().forceRecompute(); + metricsHelper.assertGauge("storeCount", stores +1, serverSource); + metricsHelper.assertGauge("storeFileCount", storeFiles + 1, serverSource); + } - // only cf2.getsize is set on Get - hTable.get(new Get(ROW).addFamily(CF2)); - assertSizeMetric(tableName, cfs, new int[] {kvLength, kvLength, 0, 0}); + @Test + public void testCheckAndPutCount() throws Exception { + String tableNameString = "testCheckAndPutCount"; + byte[] tableName = Bytes.toBytes(tableNameString); + byte[] cf = Bytes.toBytes("d"); + byte[] row = Bytes.toBytes("rk"); + byte[] qualifier = Bytes.toBytes("qual"); + byte[] valOne = Bytes.toBytes("Value"); + byte[] valTwo = Bytes.toBytes("ValueTwo"); + byte[] valThree = Bytes.toBytes("ValueThree"); - // only cf2.nextsize is set - for (Result res : hTable.getScanner(CF2)) { + TEST_UTIL.createTable(tableName, cf); + HTable t = new HTable(conf, tableName); + Put p = new Put(row); + p.add(cf, qualifier, valOne); + t.put(p); + t.flushCommits(); + + Put pTwo = new Put(row); + pTwo.add(cf, qualifier, valTwo); + t.checkAndPut(row, cf, qualifier, valOne, pTwo); + t.flushCommits(); + + Put pThree = new Put(row); + pThree.add(cf, qualifier, valThree); + t.checkAndPut(row, cf, qualifier, valOne, pThree); + t.flushCommits(); + + + metricsRegionServer.getRegionServerWrapper().forceRecompute(); + metricsHelper.assertCounter("checkMutateFailedCount", 1, serverSource); + metricsHelper.assertCounter("checkMutatePassedCount", 1, serverSource); + } + + @Test + public void testIncrement() throws Exception { + String tableNameString = "testIncrement"; + byte[] tableName = Bytes.toBytes(tableNameString); + byte[] cf = Bytes.toBytes("d"); + byte[] row = Bytes.toBytes("rk"); + byte[] qualifier = Bytes.toBytes("qual"); + byte[] val = Bytes.toBytes(0l); + + + TEST_UTIL.createTable(tableName, cf); + HTable t = new HTable(conf, tableName); + + Put p = new Put(row); + p.add(cf, qualifier, val); + t.put(p); + t.flushCommits(); + + for(int count = 0; count< 13; count++) { + Increment inc = new Increment(row); + inc.addColumn(cf, qualifier, 100); + t.increment(inc); } - assertSizeMetric(tableName, cfs, - new int[] {kvLength, kvLength, 0, kvLength}); - // only cf2.nextsize is set - for (Result res : hTable.getScanner(CF1)) { - } - assertSizeMetric(tableName, cfs, - new int[] {kvLength, kvLength, kvLength, kvLength}); + t.flushCommits(); - // getsize/nextsize should not be set on flush or compaction - for (HRegion hr : TEST_UTIL.getMiniHBaseCluster().getRegions(TABLE)) { - hr.flushcache(); - hr.compactStores(); + metricsRegionServer.getRegionServerWrapper().forceRecompute(); + metricsHelper.assertCounter("incrementNumOps", 13, serverSource); + } + + @Test + public void testAppend() throws Exception { + String tableNameString = "testAppend"; + byte[] tableName = Bytes.toBytes(tableNameString); + byte[] cf = Bytes.toBytes("d"); + byte[] row = Bytes.toBytes("rk"); + byte[] qualifier = Bytes.toBytes("qual"); + byte[] val = Bytes.toBytes("One"); + + + TEST_UTIL.createTable(tableName, cf); + HTable t = new HTable(conf, tableName); + + Put p = new Put(row); + p.add(cf, qualifier, val); + t.put(p); + t.flushCommits(); + + for(int count = 0; count< 73; count++) { + Append append = new Append(row); + append.add(cf, qualifier, Bytes.toBytes(",Test")); + t.append(append); } - assertSizeMetric(tableName, cfs, - new int[] {kvLength, kvLength, kvLength, kvLength}); + + t.flushCommits(); + + metricsRegionServer.getRegionServerWrapper().forceRecompute(); + metricsHelper.assertCounter("appendNumOps", 73, serverSource); } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java index 3f07e3273a0..e7481d363f1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java @@ -50,7 +50,6 @@ import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl; import org.apache.hadoop.hbase.io.hfile.HFileScanner; import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder; import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType; -import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; import org.apache.hadoop.hbase.util.BloomFilterFactory; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ChecksumType; @@ -72,22 +71,17 @@ public class TestStoreFile extends HBaseTestCase { private CacheConfig cacheConf = new CacheConfig(TEST_UTIL.getConfiguration()); private static String ROOT_DIR = TEST_UTIL.getDataTestDir("TestStoreFile").toString(); - private Map startingMetrics; - private static final ChecksumType CKTYPE = ChecksumType.CRC32; private static final int CKBYTES = 512; @Override public void setUp() throws Exception { super.setUp(); - this.startingMetrics = SchemaMetrics.getMetricsSnapshot(); } @Override public void tearDown() throws Exception { super.tearDown(); - LOG.info("Verifying metrics for " + getName() + ": " + this.startingMetrics); - SchemaMetrics.validateMetricChanges(this.startingMetrics); } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java index 5e1e5d3a7ae..d7910a09321 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java @@ -37,7 +37,6 @@ import org.apache.hadoop.hbase.KeyValueTestUtil; import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.regionserver.HStore.ScanInfo; -import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdge; import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper; @@ -54,7 +53,6 @@ public class TestStoreScanner extends TestCase { public void setUp() throws Exception { super.setUp(); - SchemaMetrics.setUseTableNameInTest(false); } /* diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/metrics/TestSchemaConfigured.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/metrics/TestSchemaConfigured.java deleted file mode 100644 index 0ef07757a30..00000000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/metrics/TestSchemaConfigured.java +++ /dev/null @@ -1,243 +0,0 @@ -/* - * Copyright The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ - -package org.apache.hadoop.hbase.regionserver.metrics; - -import static org.junit.Assert.*; - -import java.util.regex.Pattern; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.SmallTests; -import org.apache.hadoop.hbase.regionserver.HRegion; -import org.codehaus.jettison.json.JSONException; -import org.codehaus.jettison.json.JSONStringer; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -@Category(SmallTests.class) -public class TestSchemaConfigured { - private static final Log LOG = LogFactory.getLog(TestSchemaConfigured.class); - private final String TABLE_NAME = "myTable"; - private final String CF_NAME = "myColumnFamily"; - - private static final Path TMP_HFILE_PATH = new Path( - "/hbase/myTable/myRegion/" + HRegion.REGION_TEMP_SUBDIR + "/hfilename"); - - /** Test if toString generates real JSON */ - @Test - public void testToString() throws JSONException { - SchemaConfigured sc = new SchemaConfigured(null, TABLE_NAME, CF_NAME); - JSONStringer json = new JSONStringer(); - json.object(); - json.key("tableName"); - json.value(TABLE_NAME); - json.key("cfName"); - json.value(CF_NAME); - json.endObject(); - assertEquals(json.toString(), sc.schemaConfAsJSON()); - } - - /** Don't allow requesting metrics before setting table/CF name */ - @Test - public void testDelayedInitialization() { - SchemaConfigured unconfigured = new SchemaConfigured(); - try { - unconfigured.getSchemaMetrics(); - fail(IllegalStateException.class.getSimpleName() + " expected"); - } catch (IllegalStateException ex) { - assertTrue("Unexpected exception message: " + ex.getMessage(), - Pattern.matches(".* metrics requested before .* initialization.*", - ex.getMessage())); - LOG.debug("Expected exception: " + ex.getMessage()); - } - - SchemaMetrics.setUseTableNameInTest(false); - SchemaConfigured other = new SchemaConfigured(null, TABLE_NAME, CF_NAME); - other.passSchemaMetricsTo(unconfigured); - unconfigured.getSchemaMetrics(); // now this should succeed - } - - /** Don't allow setting table/CF name twice */ - @Test - public void testInitializingTwice() { - Configuration conf = HBaseConfiguration.create(); - for (int i = 0; i < 4; ++i) { - SchemaConfigured sc = new SchemaConfigured(conf, TABLE_NAME, CF_NAME); - SchemaConfigured target = - new SchemaConfigured(conf, TABLE_NAME + (i % 2 == 1 ? "1" : ""), - CF_NAME + ((i & 2) != 0 ? "1" : "")); - if (i == 0) { - sc.passSchemaMetricsTo(target); // No exception expected. - continue; - } - - String testDesc = - "Trying to re-configure " + target.schemaConfAsJSON() + " with " - + sc.schemaConfAsJSON(); - try { - sc.passSchemaMetricsTo(target); - fail(IllegalArgumentException.class.getSimpleName() + " expected"); - } catch (IllegalArgumentException ex) { - final String errorMsg = testDesc + ". Unexpected exception message: " + - ex.getMessage(); - final String exceptionRegex = "Trying to change table .* CF .*"; - assertTrue(errorMsg, Pattern.matches(exceptionRegex, ex.getMessage())); - LOG.debug("Expected exception: " + ex.getMessage()); - } - } - } - - @Test(expected=IllegalStateException.class) - public void testConfigureWithUnconfigured1() { - SchemaConfigured unconfigured = new SchemaConfigured(null, "t1", null); - SchemaConfigured target = new SchemaConfigured(); - unconfigured.passSchemaMetricsTo(target); - } - - @Test(expected=IllegalStateException.class) - public void testConfigureWithUnconfigured2() { - SchemaConfigured unconfigured = new SchemaConfigured(null, null, "cf1"); - SchemaConfigured target = new SchemaConfigured(); - unconfigured.passSchemaMetricsTo(target); - } - - /** - * Configuring with an uninitialized object is equivalent to re-setting - * schema metrics configuration. - */ - public void testConfigureWithNull() { - SchemaConfigured unconfigured = new SchemaConfigured(); - SchemaConfigured target = new SchemaConfigured(null, "t1", "cf1"); - unconfigured.passSchemaMetricsTo(target); - assertTrue(target.getTableName() == null); - assertTrue(target.getColumnFamilyName() == null); - } - - public void testConfigurePartiallyDefined() { - final SchemaConfigured sc = new SchemaConfigured(null, "t1", "cf1"); - final SchemaConfigured target1 = new SchemaConfigured(null, "t2", null); - sc.passSchemaMetricsTo(target1); - assertEquals("t2", target1.getColumnFamilyName()); - assertEquals("cf1", target1.getColumnFamilyName()); - - final SchemaConfigured target2 = new SchemaConfigured(null, null, "cf2"); - sc.passSchemaMetricsTo(target2); - assertEquals("t1", target2.getColumnFamilyName()); - assertEquals("cf2", target2.getColumnFamilyName()); - - final SchemaConfigured target3 = new SchemaConfigured(null, null, null); - sc.passSchemaMetricsTo(target3); - assertEquals("t1", target2.getColumnFamilyName()); - assertEquals("cf1", target2.getColumnFamilyName()); - } - - @Test(expected=IllegalArgumentException.class) - public void testConflictingConf() { - SchemaConfigured sc = new SchemaConfigured(null, "t1", "cf1"); - SchemaConfigured target = new SchemaConfigured(null, "t2", "cf1"); - sc.passSchemaMetricsTo(target); - } - - /** We allow setting CF to unknown and then reconfiguring it */ - public void testReconfigureUnknownCF() { - SchemaConfigured sc = new SchemaConfigured(null, "t1", "cf1"); - SchemaConfigured target = - new SchemaConfigured(null, "t1", SchemaMetrics.UNKNOWN); - sc.passSchemaMetricsTo(target); - } - - /** - * When the "column family" deduced from the path is ".tmp" (this happens - * for files written on compaction) we allow re-setting the CF to another - * value. - */ - @Test - public void testTmpPath() { - SchemaConfigured sc = new SchemaConfigured(null, "myTable", "myCF"); - SchemaConfigured target = new SchemaConfigured(TMP_HFILE_PATH); - sc.passSchemaMetricsTo(target); - } - - /** - * Even if CF is initially undefined (".tmp"), we don't allow to change - * table name. - */ - @Test(expected=IllegalArgumentException.class) - public void testTmpPathButInvalidTable() { - SchemaConfigured sc = new SchemaConfigured(null, "anotherTable", "myCF"); - SchemaConfigured target = new SchemaConfigured(TMP_HFILE_PATH); - sc.passSchemaMetricsTo(target); - } - - @Test - public void testSchemaConfigurationHook() { - SchemaConfigured sc = new SchemaConfigured(null, "myTable", "myCF"); - final StringBuilder newCF = new StringBuilder(); - final StringBuilder newTable = new StringBuilder(); - SchemaConfigured target = new SchemaConfigured() { - @Override - protected void schemaConfigurationChanged() { - newCF.append(getColumnFamilyName()); - newTable.append(getTableName()); - } - }; - sc.passSchemaMetricsTo(target); - assertEquals("myTable", newTable.toString()); - assertEquals("myCF", newCF.toString()); - } - - @Test - public void testResetSchemaMetricsConf() { - SchemaConfigured target = new SchemaConfigured(null, "t1", "cf1"); - SchemaConfigured.resetSchemaMetricsConf(target); - new SchemaConfigured(null, "t2", "cf2").passSchemaMetricsTo(target); - assertEquals("t2", target.getTableName()); - assertEquals("cf2", target.getColumnFamilyName()); - } - - @Test - public void testPathTooShort() { - // This has too few path components (four, the first one is empty). - SchemaConfigured sc1 = new SchemaConfigured(new Path("/a/b/c/d")); - assertEquals(SchemaMetrics.UNKNOWN, sc1.getTableName()); - assertEquals(SchemaMetrics.UNKNOWN, sc1.getColumnFamilyName()); - - SchemaConfigured sc2 = new SchemaConfigured(new Path("a/b/c/d")); - assertEquals(SchemaMetrics.UNKNOWN, sc2.getTableName()); - assertEquals(SchemaMetrics.UNKNOWN, sc2.getColumnFamilyName()); - - SchemaConfigured sc3 = new SchemaConfigured( - new Path("/hbase/tableName/regionId/cfName/hfileName")); - assertEquals("tableName", sc3.getTableName()); - assertEquals("cfName", sc3.getColumnFamilyName()); - - SchemaConfigured sc4 = new SchemaConfigured( - new Path("hbase/tableName/regionId/cfName/hfileName")); - assertEquals("tableName", sc4.getTableName()); - assertEquals("cfName", sc4.getColumnFamilyName()); - } - - -} - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/metrics/TestSchemaMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/metrics/TestSchemaMetrics.java deleted file mode 100644 index e89ac9ecbe5..00000000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/metrics/TestSchemaMetrics.java +++ /dev/null @@ -1,249 +0,0 @@ -/* - * Copyright The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ - -package org.apache.hadoop.hbase.regionserver.metrics; - -import static org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics. - BOOL_VALUES; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.fail; - -import java.util.Collection; -import java.util.HashSet; -import java.util.Map; -import java.util.Random; -import java.util.Set; - -import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.MediumTests; -import org.apache.hadoop.hbase.io.hfile.BlockType; -import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory; -import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics. - BlockMetricType; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.ClassSize; -import org.junit.Before; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.junit.runners.Parameterized.Parameters; - -@Category(MediumTests.class) -@RunWith(Parameterized.class) -public class TestSchemaMetrics { - - private final String TABLE_NAME = "myTable"; - private final String CF_NAME = "myColumnFamily"; - - private final boolean useTableName; - private Map startingMetrics; - - @Parameters - public static Collection parameters() { - return HBaseTestingUtility.BOOLEAN_PARAMETERIZED; - } - - public TestSchemaMetrics(boolean useTableName) { - this.useTableName = useTableName; - SchemaMetrics.setUseTableNameInTest(useTableName); - } - - @Before - public void setUp() { - startingMetrics = SchemaMetrics.getMetricsSnapshot(); - }; - - @Test - public void testNaming() { - final String metricPrefix = (useTableName ? "tbl." + - TABLE_NAME + "." : "") + "cf." + CF_NAME + "."; - SchemaMetrics schemaMetrics = SchemaMetrics.getInstance(TABLE_NAME, - CF_NAME); - SchemaMetrics ALL_CF_METRICS = SchemaMetrics.ALL_SCHEMA_METRICS; - - // fsReadTimeMetric - assertEquals(metricPrefix + "fsRead", schemaMetrics.getBlockMetricName( - BlockCategory.ALL_CATEGORIES, false, BlockMetricType.READ_TIME)); - - // compactionReadTimeMetric - assertEquals(metricPrefix + "compactionRead", - schemaMetrics.getBlockMetricName(BlockCategory.ALL_CATEGORIES, true, - BlockMetricType.READ_TIME)); - - // fsBlockReadCntMetric - assertEquals(metricPrefix + "fsBlockReadCnt", - schemaMetrics.getBlockMetricName(BlockCategory.ALL_CATEGORIES, false, - BlockMetricType.READ_COUNT)); - - // fsBlockReadCacheHitCntMetric - assertEquals(metricPrefix + "fsBlockReadCacheHitCnt", - schemaMetrics.getBlockMetricName(BlockCategory.ALL_CATEGORIES, false, - BlockMetricType.CACHE_HIT)); - - // fsBlockReadCacheMissCntMetric - assertEquals(metricPrefix + "fsBlockReadCacheMissCnt", - schemaMetrics.getBlockMetricName(BlockCategory.ALL_CATEGORIES, false, - BlockMetricType.CACHE_MISS)); - - // compactionBlockReadCntMetric - assertEquals(metricPrefix + "compactionBlockReadCnt", - schemaMetrics.getBlockMetricName(BlockCategory.ALL_CATEGORIES, true, - BlockMetricType.READ_COUNT)); - - // compactionBlockReadCacheHitCntMetric - assertEquals(metricPrefix + "compactionBlockReadCacheHitCnt", - schemaMetrics.getBlockMetricName(BlockCategory.ALL_CATEGORIES, true, - BlockMetricType.CACHE_HIT)); - - // compactionBlockReadCacheMissCntMetric - assertEquals(metricPrefix + "compactionBlockReadCacheMissCnt", - schemaMetrics.getBlockMetricName(BlockCategory.ALL_CATEGORIES, true, - BlockMetricType.CACHE_MISS)); - - // fsMetaBlockReadCntMetric - assertEquals("fsMetaBlockReadCnt", ALL_CF_METRICS.getBlockMetricName( - BlockCategory.META, false, BlockMetricType.READ_COUNT)); - - // fsMetaBlockReadCacheHitCntMetric - assertEquals("fsMetaBlockReadCacheHitCnt", - ALL_CF_METRICS.getBlockMetricName(BlockCategory.META, false, - BlockMetricType.CACHE_HIT)); - - // fsMetaBlockReadCacheMissCntMetric - assertEquals("fsMetaBlockReadCacheMissCnt", - ALL_CF_METRICS.getBlockMetricName(BlockCategory.META, false, - BlockMetricType.CACHE_MISS)); - - // Per-(column family, block type) statistics. - assertEquals(metricPrefix + "bt.Index.fsBlockReadCnt", - schemaMetrics.getBlockMetricName(BlockCategory.INDEX, false, - BlockMetricType.READ_COUNT)); - - assertEquals(metricPrefix + "bt.Data.compactionBlockReadCacheHitCnt", - schemaMetrics.getBlockMetricName(BlockCategory.DATA, true, - BlockMetricType.CACHE_HIT)); - - // A special case for Meta blocks - assertEquals(metricPrefix + "compactionMetaBlockReadCacheHitCnt", - schemaMetrics.getBlockMetricName(BlockCategory.META, true, - BlockMetricType.CACHE_HIT)); - - // Cache metrics - assertEquals(metricPrefix + "blockCacheSize", - schemaMetrics.getBlockMetricName(BlockCategory.ALL_CATEGORIES, false, - BlockMetricType.CACHE_SIZE)); - - assertEquals(metricPrefix + "bt.Index.blockCacheNumEvicted", - schemaMetrics.getBlockMetricName(BlockCategory.INDEX, false, - BlockMetricType.EVICTED)); - - assertEquals("bt.Data.blockCacheNumCached", - ALL_CF_METRICS.getBlockMetricName(BlockCategory.DATA, false, - BlockMetricType.CACHED)); - - assertEquals("blockCacheNumCached", ALL_CF_METRICS.getBlockMetricName( - BlockCategory.ALL_CATEGORIES, false, BlockMetricType.CACHED)); - - // "Non-compaction aware" metrics - try { - ALL_CF_METRICS.getBlockMetricName(BlockCategory.ALL_CATEGORIES, true, - BlockMetricType.CACHE_SIZE); - fail("Exception expected"); - } catch (IllegalArgumentException ex) { - } - - // Bloom metrics - assertEquals("keyMaybeInBloomCnt", ALL_CF_METRICS.getBloomMetricName(true)); - assertEquals(metricPrefix + "keyNotInBloomCnt", - schemaMetrics.getBloomMetricName(false)); - - schemaMetrics.printMetricNames(); - } - - public void checkMetrics() { - SchemaMetrics.validateMetricChanges(startingMetrics); - } - - @Test - public void testIncrements() { - Random rand = new Random(23982737L); - for (int i = 1; i <= 3; ++i) { - final String tableName = "table" + i; - for (int j = 1; j <= 3; ++j) { - final String cfName = "cf" + j; - SchemaMetrics sm = SchemaMetrics.getInstance(tableName, cfName); - for (boolean isInBloom : BOOL_VALUES) { - sm.updateBloomMetrics(isInBloom); - checkMetrics(); - } - - for (BlockCategory blockCat : BlockType.BlockCategory.values()) { - if (blockCat == BlockCategory.ALL_CATEGORIES) { - continue; - } - - for (boolean isCompaction : BOOL_VALUES) { - sm.updateOnCacheHit(blockCat, isCompaction); - checkMetrics(); - sm.updateOnCacheMiss(blockCat, isCompaction, rand.nextInt()); - checkMetrics(); - } - - for (boolean isEviction : BOOL_VALUES) { - sm.updateOnCachePutOrEvict(blockCat, (isEviction ? -1 : 1) - * rand.nextInt(1024 * 1024), isEviction); - } - } - } - } - } - - @Test - public void testGenerateSchemaMetricsPrefix() { - String tableName = "table1"; - int numCF = 3; - - StringBuilder expected = new StringBuilder(); - if (useTableName) { - expected.append("tbl."); - expected.append(tableName); - expected.append("."); - } - expected.append("cf."); - Set families = new HashSet(); - for (int i = 1; i <= numCF; i++) { - String cf = "cf" + i; - families.add(Bytes.toBytes(cf)); - expected.append(cf); - if (i == numCF) { - expected.append("."); - } else { - expected.append("~"); - } - } - - String result = SchemaMetrics.generateSchemaMetricsPrefix(tableName, - families); - assertEquals(expected.toString(), result); - } - - -} -