getRange()[i]; i++);
- getRangeVals().incrementAndGet(i);
- }
@Override
- public void snapshot(MetricsRecordBuilder metricsRecordBuilder, boolean all) {
- if (all || changed()) {
- clearChanged();
- updateSnapshotMetrics(metricsRecordBuilder);
- updateSnapshotRangeMetrics(metricsRecordBuilder);
- }
+ public synchronized void snapshot(MetricsRecordBuilder metricsRecordBuilder, boolean all) {
+ // Get a reference to the old histogram.
+ FastLongHistogram histo = histogram.reset();
+ updateSnapshotMetrics(metricsRecordBuilder, histo);
+ updateSnapshotRangeMetrics(metricsRecordBuilder, histo);
}
-
- public void updateSnapshotRangeMetrics(MetricsRecordBuilder metricsRecordBuilder) {
- long prior = 0;
- for (int i = 0; i < getRange().length; i++) {
- long val = getRangeVals().get(i);
- if (val > 0) {
+
+ public void updateSnapshotRangeMetrics(MetricsRecordBuilder metricsRecordBuilder,
+ FastLongHistogram histogram) {
+ long priorRange = 0;
+ long cumNum = 0;
+
+ final long[] ranges = getRanges();
+ final String rangeType = getRangeType();
+ for (int i = 0; i < ranges.length - 1; i++) {
+ long val = histogram.getNumAtOrBelow(ranges[i]);
+ if (val - cumNum > 0) {
metricsRecordBuilder.addCounter(
- Interns.info(name + "_" + getRangeType() + "_" + prior + "-" + getRange()[i], desc), val);
+ Interns.info(name + "_" + rangeType + "_" + priorRange + "-" + ranges[i], desc),
+ val - cumNum);
}
- prior = getRange()[i];
+ priorRange = ranges[i];
+ cumNum = val;
}
- long val = getRangeVals().get(getRange().length);
- if (val > 0) {
+ long val = histogram.getCount();
+ if (val - cumNum > 0) {
metricsRecordBuilder.addCounter(
- Interns.info(name + "_" + getRangeType() + "_" + getRange()[getRange().length - 1] + "-inf", desc),
- getRangeVals().get(getRange().length));
+ Interns.info(name + "_" + rangeType + "_" + ranges[ranges.length - 1] + "-inf", desc),
+ val - cumNum);
}
}
}
diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableSizeHistogram.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableSizeHistogram.java
index 2f1d57a4d2d..38e78a2324e 100644
--- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableSizeHistogram.java
+++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableSizeHistogram.java
@@ -18,8 +18,6 @@
package org.apache.hadoop.metrics2.lib;
-import java.util.concurrent.atomic.AtomicLongArray;
-
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.metrics2.MetricsInfo;
@@ -28,30 +26,29 @@ import org.apache.hadoop.metrics2.MetricsInfo;
*/
@InterfaceAudience.Private
public class MutableSizeHistogram extends MutableRangeHistogram {
- private final String rangeType = "SizeRangeCount";
- private final long[] ranges = {10,100,1000,10000,100000,1000000,10000000,100000000};
- private final AtomicLongArray rangeVals = new AtomicLongArray(getRange().length+1);
+ private final static String RANGE_TYPE = "SizeRangeCount";
+ private final static long[] RANGES = {10,100,1000,10000,100000,1000000,10000000,100000000};
public MutableSizeHistogram(MetricsInfo info) {
this(info.name(), info.description());
}
public MutableSizeHistogram(String name, String description) {
- super(name, description);
+ this(name, description, RANGES[RANGES.length-2]);
+ }
+
+ public MutableSizeHistogram(String name, String description, long expectedMax) {
+ super(name, description, expectedMax);
}
@Override
public String getRangeType() {
- return rangeType;
+ return RANGE_TYPE;
}
@Override
- public long[] getRange() {
- return ranges;
+ public long[] getRanges() {
+ return RANGES;
}
-
- @Override
- public AtomicLongArray getRangeVals() {
- return rangeVals;
- }
+
}
diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableTimeHistogram.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableTimeHistogram.java
index 32d4fae4e93..aaf4359f18c 100644
--- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableTimeHistogram.java
+++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableTimeHistogram.java
@@ -18,8 +18,6 @@
package org.apache.hadoop.metrics2.lib;
-import java.util.concurrent.atomic.AtomicLongArray;
-
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.metrics2.MetricsInfo;
@@ -28,31 +26,30 @@ import org.apache.hadoop.metrics2.MetricsInfo;
*/
@InterfaceAudience.Private
public class MutableTimeHistogram extends MutableRangeHistogram {
- private final String rangeType = "TimeRangeCount";
- private final long[] ranges =
+ private final static String RANGE_TYPE = "TimeRangeCount";
+ private final static long[] RANGES =
{ 1, 3, 10, 30, 100, 300, 1000, 3000, 10000, 30000, 60000, 120000, 300000, 600000 };
- private final AtomicLongArray rangeVals = new AtomicLongArray(ranges.length+1);
public MutableTimeHistogram(MetricsInfo info) {
this(info.name(), info.description());
}
public MutableTimeHistogram(String name, String description) {
- super(name, description);
+ this(name, description, RANGES[RANGES.length - 2]);
+ }
+
+ public MutableTimeHistogram(String name, String description, long expectedMax) {
+ super(name, description, expectedMax);
}
@Override
public String getRangeType() {
- return rangeType;
+ return RANGE_TYPE;
}
@Override
- public long[] getRange() {
- return ranges;
+ public long[] getRanges() {
+ return RANGES;
}
- @Override
- public AtomicLongArray getRangeVals() {
- return rangeVals;
- }
}
diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseSourceImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseSourceImpl.java
index 7381fb9b870..2e374f7058a 100644
--- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseSourceImpl.java
+++ b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseSourceImpl.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.metrics;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.apache.hadoop.hbase.testclassification.MetricsTests;
import org.apache.hadoop.metrics2.lib.MutableCounterLong;
+import org.apache.hadoop.metrics2.lib.MutableFastCounter;
import org.apache.hadoop.metrics2.lib.MutableGaugeLong;
import org.junit.BeforeClass;
import org.junit.Test;
@@ -72,9 +73,9 @@ public class TestBaseSourceImpl {
@Test
public void testIncCounters() throws Exception {
bmsi.incCounters("testinccounter", 100);
- assertEquals(100, ((MutableCounterLong) bmsi.metricsRegistry.get("testinccounter")).value());
+ assertEquals(100, ((MutableFastCounter) bmsi.metricsRegistry.get("testinccounter")).value());
bmsi.incCounters("testinccounter", 100);
- assertEquals(200, ((MutableCounterLong) bmsi.metricsRegistry.get("testinccounter")).value());
+ assertEquals(200, ((MutableFastCounter) bmsi.metricsRegistry.get("testinccounter")).value());
}
diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon
index 6986f129946..3dcd5e25f97 100644
--- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon
+++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon
@@ -178,7 +178,6 @@ org.apache.hadoop.util.StringUtils;
AgeSnapshot ageAtEvictionSnapshot = bc.getStats().getAgeAtEvictionSnapshot();
// Only show if non-zero mean and stddev as is the case in combinedblockcache
double mean = ageAtEvictionSnapshot.getMean();
- double stddev = ageAtEvictionSnapshot.getStdDev();
%java>
Evicted |
@@ -197,13 +196,6 @@ org.apache.hadoop.util.StringUtils;
Mean age of Blocks at eviction time (seconds) |
%if>
-<%if stddev > 0 %>
-
- StdDev |
- <% String.format("%,d", (long)(ageAtEvictionSnapshot.getStdDev()/1000000)) %> |
- Standard Deviation for age of Blocks at eviction time |
-
-%if>
%def>
<%def hits_tmpl>
diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheViewTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheViewTmpl.jamon
index 523d1b9beba..fa55f6a04ba 100644
--- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheViewTmpl.jamon
+++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheViewTmpl.jamon
@@ -37,7 +37,6 @@ org.apache.hadoop.hbase.io.hfile.bucket.BucketCache;
org.apache.hadoop.hbase.io.hfile.bucket.BucketAllocator;
org.apache.hadoop.hbase.io.hfile.bucket.BucketAllocator.Bucket;
org.apache.hadoop.util.StringUtils;
-com.yammer.metrics.stats.Snapshot;
%import>
<%java>
BlockCache bc = cacheConfig == null ? null : cacheConfig.getBlockCache();
diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon
index ed0926fd321..e04756dd81d 100644
--- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon
+++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon
@@ -34,7 +34,6 @@ org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ServerInfo;
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad;
org.apache.hadoop.hbase.util.DirectMemoryUtils;
org.apache.hadoop.util.StringUtils;
-com.yammer.metrics.stats.Snapshot;
java.lang.management.ManagementFactory;
%import>
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AgeSnapshot.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AgeSnapshot.java
index 24a4e320164..4c1ad23e46c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AgeSnapshot.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AgeSnapshot.java
@@ -17,58 +17,54 @@
*/
package org.apache.hadoop.hbase.io.hfile;
+import org.apache.hadoop.hbase.util.FastLongHistogram;
import org.codehaus.jackson.annotate.JsonIgnoreProperties;
-import com.yammer.metrics.core.Histogram;
-import com.yammer.metrics.stats.Snapshot;
-
/**
* Snapshot of block cache age in cache.
* This object is preferred because we can control how it is serialized out when JSON'ing.
*/
@JsonIgnoreProperties({"ageHistogram", "snapshot"})
public class AgeSnapshot {
- private final Histogram ageHistogram;
- private final Snapshot snapshot;
- AgeSnapshot(final Histogram ageHistogram) {
+ private final FastLongHistogram ageHistogram;
+ private final long[] quantiles;
+
+ AgeSnapshot(final FastLongHistogram ageHistogram) {
this.ageHistogram = ageHistogram;
- this.snapshot = ageHistogram.getSnapshot();
+ this.quantiles = ageHistogram.getQuantiles(new double[]{0.75, 0.95, 0.98, 0.99, 0.999});
}
public double get75thPercentile() {
- return snapshot.get75thPercentile();
+ return quantiles[0];
}
public double get95thPercentile() {
- return snapshot.get95thPercentile();
+ return quantiles[1];
}
public double get98thPercentile() {
- return snapshot.get98thPercentile();
- }
-
- public double get999thPercentile() {
- return snapshot.get999thPercentile();
+ return quantiles[2];
}
public double get99thPercentile() {
- return snapshot.get99thPercentile();
+ return quantiles[3];
}
+ public double get999thPercentile() {
+ return quantiles[4];
+ }
+
+
public double getMean() {
- return this.ageHistogram.mean();
+ return this.ageHistogram.getMean();
}
public double getMax() {
- return ageHistogram.max();
+ return this.ageHistogram.getMax();
}
public double getMin() {
- return ageHistogram.min();
- }
-
- public double getStdDev() {
- return ageHistogram.stdDev();
+ return this.ageHistogram.getMin();
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java
index 94638dacf3c..ff67337b32b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java
@@ -25,27 +25,19 @@ import java.util.concurrent.ConcurrentSkipListSet;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.util.FastLongHistogram;
import org.codehaus.jackson.JsonGenerationException;
import org.codehaus.jackson.annotate.JsonIgnoreProperties;
import org.codehaus.jackson.map.JsonMappingException;
import org.codehaus.jackson.map.ObjectMapper;
import org.codehaus.jackson.map.SerializationConfig;
-import com.yammer.metrics.core.Histogram;
-import com.yammer.metrics.core.MetricsRegistry;
-import com.yammer.metrics.stats.Snapshot;
-
/**
* Utilty for aggregating counts in CachedBlocks and toString/toJSON CachedBlocks and BlockCaches.
* No attempt has been made at making this thread safe.
*/
@InterfaceAudience.Private
public class BlockCacheUtil {
- /**
- * Needed making histograms.
- */
- private static final MetricsRegistry METRICS = new MetricsRegistry();
-
/**
* Needed generating JSON.
*/
@@ -204,7 +196,7 @@ public class BlockCacheUtil {
*/
private NavigableMap> cachedBlockByFile =
new ConcurrentSkipListMap>();
- Histogram age = METRICS.newHistogram(CachedBlocksByFile.class, "age");
+ FastLongHistogram hist = new FastLongHistogram();
/**
* @param cb
@@ -226,7 +218,7 @@ public class BlockCacheUtil {
this.dataSize += cb.getSize();
}
long age = this.now - cb.getCachedTime();
- this.age.update(age);
+ this.hist.add(age, 1);
return false;
}
@@ -269,18 +261,22 @@ public class BlockCacheUtil {
}
public AgeSnapshot getAgeInCacheSnapshot() {
- return new AgeSnapshot(this.age);
+ return new AgeSnapshot(this.hist);
}
@Override
public String toString() {
- Snapshot snapshot = this.age.getSnapshot();
- return "count=" + count + ", dataBlockCount=" + this.dataBlockCount + ", size=" + size +
+ AgeSnapshot snapshot = getAgeInCacheSnapshot();
+ return "count=" + count + ", dataBlockCount=" + dataBlockCount + ", size=" + size +
", dataSize=" + getDataSize() +
- ", mean age=" + this.age.mean() + ", stddev age=" + this.age.stdDev() +
- ", min age=" + this.age.min() + ", max age=" + this.age.max() +
- ", 95th percentile age=" + snapshot.get95thPercentile() +
- ", 99th percentile age=" + snapshot.get99thPercentile();
+ ", mean age=" + snapshot.getMean() +
+ ", min age=" + snapshot.getMin() +
+ ", max age=" + snapshot.getMax() +
+ ", 75th percentile age=" + snapshot.get75thPercentile() +
+ ", 95th percentile age=" + snapshot.get95thPercentile() +
+ ", 98th percentile age=" + snapshot.get98thPercentile() +
+ ", 99th percentile age=" + snapshot.get99thPercentile() +
+ ", 99.9th percentile age=" + snapshot.get99thPercentile();
}
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java
index fff6585aea5..b2a0d09a1e0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java
@@ -22,18 +22,14 @@ import java.util.concurrent.atomic.AtomicLong;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import com.yammer.metrics.core.Histogram;
-import com.yammer.metrics.core.MetricsRegistry;
+import org.apache.hadoop.hbase.util.Counter;
+import org.apache.hadoop.hbase.util.FastLongHistogram;
/**
* Class that implements cache metrics.
*/
@InterfaceAudience.Private
public class CacheStats {
- /**
- * Needed making histograms.
- */
- private static final MetricsRegistry METRICS = new MetricsRegistry();
/** Sliding window statistics. The number of metric periods to include in
* sliding window hit ratio calculations.
@@ -41,10 +37,10 @@ public class CacheStats {
static final int DEFAULT_WINDOW_PERIODS = 5;
/** The number of getBlock requests that were cache hits */
- private final AtomicLong hitCount = new AtomicLong(0);
+ private final Counter hitCount = new Counter();
/** The number of getBlock requests that were cache hits from primary replica */
- private final AtomicLong primaryHitCount = new AtomicLong(0);
+ private final Counter primaryHitCount = new Counter();
/**
* The number of getBlock requests that were cache hits, but only from
@@ -52,27 +48,27 @@ public class CacheStats {
* attempt to read from the block cache even if they will not put new blocks
* into the block cache. See HBASE-2253 for more information.
*/
- private final AtomicLong hitCachingCount = new AtomicLong(0);
+ private final Counter hitCachingCount = new Counter();
/** The number of getBlock requests that were cache misses */
- private final AtomicLong missCount = new AtomicLong(0);
+ private final Counter missCount = new Counter();
/** The number of getBlock requests for primary replica that were cache misses */
- private final AtomicLong primaryMissCount = new AtomicLong(0);
+ private final Counter primaryMissCount = new Counter();
/**
* The number of getBlock requests that were cache misses, but only from
* requests that were set to use the block cache.
*/
- private final AtomicLong missCachingCount = new AtomicLong(0);
+ private final Counter missCachingCount = new Counter();
/** The number of times an eviction has occurred */
- private final AtomicLong evictionCount = new AtomicLong(0);
+ private final Counter evictionCount = new Counter();
/** The total number of blocks that have been evicted */
- private final AtomicLong evictedBlockCount = new AtomicLong(0);
+ private final Counter evictedBlockCount = new Counter();
/** The total number of blocks for primary replica that have been evicted */
- private final AtomicLong primaryEvictedBlockCount = new AtomicLong(0);
+ private final Counter primaryEvictedBlockCount = new Counter();
/** The total number of blocks that were not inserted. */
private final AtomicLong failedInserts = new AtomicLong(0);
@@ -100,7 +96,7 @@ public class CacheStats {
/**
* Keep running age at eviction time
*/
- private Histogram ageAtEviction;
+ private FastLongHistogram ageAtEviction;
private long startTime = System.nanoTime();
public CacheStats(final String name) {
@@ -113,7 +109,7 @@ public class CacheStats {
this.hitCachingCounts = initializeZeros(numPeriodsInWindow);
this.requestCounts = initializeZeros(numPeriodsInWindow);
this.requestCachingCounts = initializeZeros(numPeriodsInWindow);
- this.ageAtEviction = METRICS.newHistogram(CacheStats.class, name + ".ageAtEviction");
+ this.ageAtEviction = new FastLongHistogram();
}
@Override
@@ -125,14 +121,13 @@ public class CacheStats {
", evictedBlockCount=" + getEvictedCount() +
", primaryMissCount=" + getPrimaryMissCount() +
", primaryHitCount=" + getPrimaryHitCount() +
- ", evictedAgeMean=" + snapshot.getMean() +
- ", evictedAgeStdDev=" + snapshot.getStdDev();
+ ", evictedAgeMean=" + snapshot.getMean();
}
public void miss(boolean caching, boolean primary) {
- missCount.incrementAndGet();
- if (primary) primaryMissCount.incrementAndGet();
- if (caching) missCachingCount.incrementAndGet();
+ missCount.increment();
+ if (primary) primaryMissCount.increment();
+ if (caching) missCachingCount.increment();
}
public void hit(boolean caching) {
@@ -140,20 +135,20 @@ public class CacheStats {
}
public void hit(boolean caching, boolean primary) {
- hitCount.incrementAndGet();
- if (primary) primaryHitCount.incrementAndGet();
- if (caching) hitCachingCount.incrementAndGet();
+ hitCount.increment();
+ if (primary) primaryHitCount.increment();
+ if (caching) hitCachingCount.increment();
}
public void evict() {
- evictionCount.incrementAndGet();
+ evictionCount.increment();
}
public void evicted(final long t, boolean primary) {
- if (t > this.startTime) this.ageAtEviction.update(t - this.startTime);
- this.evictedBlockCount.incrementAndGet();
+ if (t > this.startTime) this.ageAtEviction.add(t - this.startTime,1);
+ this.evictedBlockCount.increment();
if (primary) {
- primaryEvictedBlockCount.incrementAndGet();
+ primaryEvictedBlockCount.increment();
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
index a67bf8c06c3..3fb9554112e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
@@ -35,7 +35,6 @@ import java.util.Map;
import java.util.Set;
import java.util.SortedMap;
import java.util.TreeMap;
-import java.util.concurrent.atomic.AtomicLong;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -62,6 +61,7 @@ import org.apache.hadoop.hbase.protobuf.generated.HFileProtos;
import org.apache.hadoop.hbase.util.BloomFilterWriter;
import org.apache.hadoop.hbase.util.ByteStringer;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Counter;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.io.Writable;
@@ -179,17 +179,19 @@ public class HFile {
*/
public static final int DEFAULT_BYTES_PER_CHECKSUM = 16 * 1024;
// For measuring number of checksum failures
- static final AtomicLong checksumFailures = new AtomicLong();
+ static final Counter checksumFailures = new Counter();
// for test purpose
- public static final AtomicLong dataBlockReadCnt = new AtomicLong(0);
+ public static final Counter dataBlockReadCnt = new Counter();
/**
* Number of checksum verification failures. It also
* clears the counter.
*/
public static final long getChecksumFailuresCount() {
- return checksumFailures.getAndSet(0);
+ long count = checksumFailures.get();
+ checksumFailures.set(0);
+ return count;
}
/** API required to write an {@link HFile} */
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
index edccfb59dbe..6cd7b20a1dc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
@@ -1523,7 +1523,7 @@ public class HFileBlock implements Cacheable {
HFile.LOG.warn(msg);
throw new IOException(msg); // cannot happen case here
}
- HFile.checksumFailures.incrementAndGet(); // update metrics
+ HFile.checksumFailures.increment(); // update metrics
// If we have a checksum failure, we fall back into a mode where
// the next few reads use HDFS level checksums. We aim to make the
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
index a1b4c340d69..50200dc123b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
@@ -417,7 +417,7 @@ public class HFileReaderV2 extends AbstractHFileReader {
assert cachedBlock.isUnpacked() : "Packed block leak.";
if (cachedBlock.getBlockType().isData()) {
if (updateCacheMetrics) {
- HFile.dataBlockReadCnt.incrementAndGet();
+ HFile.dataBlockReadCnt.increment();
}
// Validate encoding type for data blocks. We include encoding
// type in the cache key, and we expect it to match on a cache hit.
@@ -456,7 +456,7 @@ public class HFileReaderV2 extends AbstractHFileReader {
}
if (updateCacheMetrics && hfileBlock.getBlockType().isData()) {
- HFile.dataBlockReadCnt.incrementAndGet();
+ HFile.dataBlockReadCnt.increment();
}
return unpacked;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCacheStats.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCacheStats.java
index 51e6268fc93..ec1d4d9f683 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCacheStats.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCacheStats.java
@@ -18,10 +18,9 @@
*/
package org.apache.hadoop.hbase.io.hfile.bucket;
-import java.util.concurrent.atomic.AtomicLong;
-
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.io.hfile.CacheStats;
+import org.apache.hadoop.hbase.util.Counter;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
/**
@@ -29,8 +28,8 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
*/
@InterfaceAudience.Private
public class BucketCacheStats extends CacheStats {
- private final AtomicLong ioHitCount = new AtomicLong(0);
- private final AtomicLong ioHitTime = new AtomicLong(0);
+ private final Counter ioHitCount = new Counter(0);
+ private final Counter ioHitTime = new Counter(0);
private final static int nanoTime = 1000000;
private long lastLogTime = EnvironmentEdgeManager.currentTime();
@@ -45,8 +44,8 @@ public class BucketCacheStats extends CacheStats {
}
public void ioHit(long time) {
- ioHitCount.incrementAndGet();
- ioHitTime.addAndGet(time);
+ ioHitCount.increment();
+ ioHitTime.add(time);
}
public long getIOHitsPerSecond() {