diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AgeSnapshot.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AgeSnapshot.java
index 24a4e320164..79acec02ebe 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AgeSnapshot.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AgeSnapshot.java
@@ -19,8 +19,8 @@ package org.apache.hadoop.hbase.io.hfile;
import org.codehaus.jackson.annotate.JsonIgnoreProperties;
-import com.yammer.metrics.core.Histogram;
-import com.yammer.metrics.stats.Snapshot;
+import com.codahale.metrics.Histogram;
+import com.codahale.metrics.Snapshot;
/**
* Snapshot of block cache age in cache.
@@ -28,11 +28,9 @@ import com.yammer.metrics.stats.Snapshot;
*/
@JsonIgnoreProperties({"ageHistogram", "snapshot"})
public class AgeSnapshot {
- private final Histogram ageHistogram;
private final Snapshot snapshot;
AgeSnapshot(final Histogram ageHistogram) {
- this.ageHistogram = ageHistogram;
this.snapshot = ageHistogram.getSnapshot();
}
@@ -57,18 +55,18 @@ public class AgeSnapshot {
}
public double getMean() {
- return this.ageHistogram.mean();
+ return this.snapshot.getMean();
}
public double getMax() {
- return ageHistogram.max();
+ return snapshot.getMax();
}
public double getMin() {
- return ageHistogram.min();
+ return snapshot.getMin();
}
public double getStdDev() {
- return ageHistogram.stdDev();
+ return snapshot.getStdDev();
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java
index 94638dacf3c..d81871f298f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java
@@ -31,9 +31,11 @@ import org.codehaus.jackson.map.JsonMappingException;
import org.codehaus.jackson.map.ObjectMapper;
import org.codehaus.jackson.map.SerializationConfig;
-import com.yammer.metrics.core.Histogram;
-import com.yammer.metrics.core.MetricsRegistry;
-import com.yammer.metrics.stats.Snapshot;
+import com.codahale.metrics.Histogram;
+import com.codahale.metrics.MetricRegistry;
+import com.codahale.metrics.Snapshot;
+
+import static com.codahale.metrics.MetricRegistry.name;
/**
* Utilty for aggregating counts in CachedBlocks and toString/toJSON CachedBlocks and BlockCaches.
@@ -44,7 +46,7 @@ public class BlockCacheUtil {
/**
* Needed making histograms.
*/
- private static final MetricsRegistry METRICS = new MetricsRegistry();
+ private static final MetricRegistry METRICS = new MetricRegistry();
/**
* Needed generating JSON.
@@ -189,7 +191,7 @@ public class BlockCacheUtil {
private final long now = System.nanoTime();
private final int max;
public static final int DEFAULT_MAX = 100000;
-
+
CachedBlocksByFile() {
this(null);
}
@@ -204,7 +206,7 @@ public class BlockCacheUtil {
*/
private NavigableMap
> cachedBlockByFile =
new ConcurrentSkipListMap>();
- Histogram age = METRICS.newHistogram(CachedBlocksByFile.class, "age");
+ Histogram age = METRICS.histogram(name(CachedBlocksByFile.class, "age"));
/**
* @param cb
@@ -274,11 +276,11 @@ public class BlockCacheUtil {
@Override
public String toString() {
- Snapshot snapshot = this.age.getSnapshot();
- return "count=" + count + ", dataBlockCount=" + this.dataBlockCount + ", size=" + size +
+ Snapshot snapshot = age.getSnapshot();
+ return "count=" + count + ", dataBlockCount=" + dataBlockCount + ", size=" + size +
", dataSize=" + getDataSize() +
- ", mean age=" + this.age.mean() + ", stddev age=" + this.age.stdDev() +
- ", min age=" + this.age.min() + ", max age=" + this.age.max() +
+ ", mean age=" + snapshot.getMean() + ", stddev age=" + snapshot.getStdDev() +
+ ", min age=" + snapshot.getMin() + ", max age=" + snapshot.getMax() +
", 95th percentile age=" + snapshot.get95thPercentile() +
", 99th percentile age=" + snapshot.get99thPercentile();
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java
index fff6585aea5..50e8bbb8b54 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java
@@ -22,8 +22,10 @@ import java.util.concurrent.atomic.AtomicLong;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import com.yammer.metrics.core.Histogram;
-import com.yammer.metrics.core.MetricsRegistry;
+import com.codahale.metrics.Histogram;
+import com.codahale.metrics.MetricRegistry;
+
+import static com.codahale.metrics.MetricRegistry.name;
/**
* Class that implements cache metrics.
@@ -33,7 +35,7 @@ public class CacheStats {
/**
* Needed making histograms.
*/
- private static final MetricsRegistry METRICS = new MetricsRegistry();
+ private static final MetricRegistry METRICS = new MetricRegistry();
/** Sliding window statistics. The number of metric periods to include in
* sliding window hit ratio calculations.
@@ -113,7 +115,7 @@ public class CacheStats {
this.hitCachingCounts = initializeZeros(numPeriodsInWindow);
this.requestCounts = initializeZeros(numPeriodsInWindow);
this.requestCachingCounts = initializeZeros(numPeriodsInWindow);
- this.ageAtEviction = METRICS.newHistogram(CacheStats.class, name + ".ageAtEviction");
+ this.ageAtEviction = METRICS.histogram(name(CacheStats.class, name + ".ageAtEviction"));
}
@Override
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
index 2818d88501f..86d183b86df 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
@@ -23,6 +23,7 @@ import java.io.ByteArrayOutputStream;
import java.io.DataInput;
import java.io.IOException;
import java.io.PrintStream;
+import java.text.DateFormat;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
@@ -32,6 +33,8 @@ import java.util.Locale;
import java.util.Map;
import java.util.Set;
import java.util.SortedMap;
+import java.util.TimeZone;
+import java.util.concurrent.TimeUnit;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
@@ -41,6 +44,7 @@ import org.apache.commons.cli.OptionGroup;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.commons.cli.PosixParser;
+import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
@@ -73,12 +77,18 @@ import org.apache.hadoop.hbase.util.Writables;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
-import com.yammer.metrics.core.Histogram;
-import com.yammer.metrics.core.Metric;
-import com.yammer.metrics.core.MetricName;
-import com.yammer.metrics.core.MetricPredicate;
-import com.yammer.metrics.core.MetricsRegistry;
-import com.yammer.metrics.reporting.ConsoleReporter;
+import com.codahale.metrics.Histogram;
+import com.codahale.metrics.Counter;
+import com.codahale.metrics.Gauge;
+import com.codahale.metrics.Meter;
+import com.codahale.metrics.MetricFilter;
+import com.codahale.metrics.MetricRegistry;
+import com.codahale.metrics.ConsoleReporter;
+import com.codahale.metrics.ScheduledReporter;
+import com.codahale.metrics.Snapshot;
+import com.codahale.metrics.Timer;
+
+import static com.codahale.metrics.MetricRegistry.name;
/**
* Implements pretty-printing functionality for {@link HFile}s.
@@ -544,13 +554,17 @@ public class HFilePrettyPrinter extends Configured implements Tool {
}
private static class KeyValueStatsCollector {
- private final MetricsRegistry metricsRegistry = new MetricsRegistry();
+ private final MetricRegistry metricsRegistry = new MetricRegistry();
private final ByteArrayOutputStream metricsOutput = new ByteArrayOutputStream();
- private final SimpleReporter simpleReporter = new SimpleReporter(metricsRegistry, new PrintStream(metricsOutput));
- Histogram keyLen = metricsRegistry.newHistogram(HFilePrettyPrinter.class, "Key length");
- Histogram valLen = metricsRegistry.newHistogram(HFilePrettyPrinter.class, "Val length");
- Histogram rowSizeBytes = metricsRegistry.newHistogram(HFilePrettyPrinter.class, "Row size (bytes)");
- Histogram rowSizeCols = metricsRegistry.newHistogram(HFilePrettyPrinter.class, "Row size (columns)");
+ private final SimpleReporter simpleReporter = SimpleReporter.forRegistry(metricsRegistry).
+ outputTo(new PrintStream(metricsOutput)).filter(MetricFilter.ALL).build();
+
+ Histogram keyLen = metricsRegistry.histogram(name(HFilePrettyPrinter.class, "Key length"));
+ Histogram valLen = metricsRegistry.histogram(name(HFilePrettyPrinter.class, "Val length"));
+ Histogram rowSizeBytes = metricsRegistry.histogram(
+ name(HFilePrettyPrinter.class, "Row size (bytes)"));
+ Histogram rowSizeCols = metricsRegistry.histogram(
+ name(HFilePrettyPrinter.class, "Row size (columns)"));
long curRowBytes = 0;
long curRowCols = 0;
@@ -600,9 +614,8 @@ public class HFilePrettyPrinter extends Configured implements Tool {
return "no data available for statistics";
// Dump the metrics to the output stream
- simpleReporter.shutdown();
- simpleReporter.run();
- metricsRegistry.shutdown();
+ simpleReporter.stop();
+ simpleReporter.report();
return
metricsOutput.toString() +
@@ -610,35 +623,137 @@ public class HFilePrettyPrinter extends Configured implements Tool {
}
}
- private static class SimpleReporter extends ConsoleReporter {
- private final PrintStream out;
-
- public SimpleReporter(MetricsRegistry metricsRegistry, PrintStream out) {
- super(metricsRegistry, out, MetricPredicate.ALL);
- this.out = out;
+ /**
+ * Almost identical to ConsoleReporter, but extending ScheduledReporter,
+ * as extending ConsoleReporter in this version of dropwizard is now too much trouble.
+ */
+ private static class SimpleReporter extends ScheduledReporter {
+ /**
+ * Returns a new {@link Builder} for {@link ConsoleReporter}.
+ *
+ * @param registry the registry to report
+ * @return a {@link Builder} instance for a {@link ConsoleReporter}
+ */
+ public static Builder forRegistry(MetricRegistry registry) {
+ return new Builder(registry);
}
- @Override
- public void run() {
- for (Map.Entry> entry : getMetricsRegistry().groupedMetrics(
- MetricPredicate.ALL).entrySet()) {
- try {
- for (Map.Entry subEntry : entry.getValue().entrySet()) {
- out.print(" " + subEntry.getKey().getName());
- out.println(':');
+ /**
+ * A builder for {@link SimpleReporter} instances. Defaults to using the default locale and
+ * time zone, writing to {@code System.out}, converting rates to events/second, converting
+ * durations to milliseconds, and not filtering metrics.
+ */
+ public static class Builder {
+ private final MetricRegistry registry;
+ private PrintStream output;
+ private Locale locale;
+ private TimeZone timeZone;
+ private TimeUnit rateUnit;
+ private TimeUnit durationUnit;
+ private MetricFilter filter;
- subEntry.getValue().processWith(this, subEntry.getKey(), out);
- }
- } catch (Exception e) {
- e.printStackTrace(out);
- }
+ private Builder(MetricRegistry registry) {
+ this.registry = registry;
+ this.output = System.out;
+ this.locale = Locale.getDefault();
+ this.timeZone = TimeZone.getDefault();
+ this.rateUnit = TimeUnit.SECONDS;
+ this.durationUnit = TimeUnit.MILLISECONDS;
+ this.filter = MetricFilter.ALL;
+ }
+
+ /**
+ * Write to the given {@link PrintStream}.
+ *
+ * @param output a {@link PrintStream} instance.
+ * @return {@code this}
+ */
+ public Builder outputTo(PrintStream output) {
+ this.output = output;
+ return this;
+ }
+
+ /**
+ * Only report metrics which match the given filter.
+ *
+ * @param filter a {@link MetricFilter}
+ * @return {@code this}
+ */
+ public Builder filter(MetricFilter filter) {
+ this.filter = filter;
+ return this;
+ }
+
+ /**
+ * Builds a {@link ConsoleReporter} with the given properties.
+ *
+ * @return a {@link ConsoleReporter}
+ */
+ public SimpleReporter build() {
+ return new SimpleReporter(registry,
+ output,
+ locale,
+ timeZone,
+ rateUnit,
+ durationUnit,
+ filter);
}
}
+ private final PrintStream output;
+ private final Locale locale;
+ private final DateFormat dateFormat;
+
+ private SimpleReporter(MetricRegistry registry,
+ PrintStream output,
+ Locale locale,
+ TimeZone timeZone,
+ TimeUnit rateUnit,
+ TimeUnit durationUnit,
+ MetricFilter filter) {
+ super(registry, "simple-reporter", filter, rateUnit, durationUnit);
+ this.output = output;
+ this.locale = locale;
+
+ this.dateFormat = DateFormat.getDateTimeInstance(DateFormat.SHORT,
+ DateFormat.MEDIUM,
+ locale);
+ dateFormat.setTimeZone(timeZone);
+ }
+
@Override
- public void processHistogram(MetricName name, Histogram histogram, PrintStream stream) {
- super.processHistogram(name, histogram, stream);
- stream.printf(Locale.getDefault(), " count = %d%n", histogram.count());
+ public void report(SortedMap gauges,
+ SortedMap counters,
+ SortedMap histograms,
+ SortedMap meters,
+ SortedMap timers) {
+ // we know we only have histograms
+ if (!histograms.isEmpty()) {
+ for (Map.Entry entry : histograms.entrySet()) {
+ output.print(" " + StringUtils.substringAfterLast(entry.getKey(), "."));
+ output.println(':');
+ printHistogram(entry.getValue());
+ }
+ output.println();
+ }
+
+ output.println();
+ output.flush();
+ }
+
+ private void printHistogram(Histogram histogram) {
+ Snapshot snapshot = histogram.getSnapshot();
+ output.printf(locale, " min = %d%n", snapshot.getMin());
+ output.printf(locale, " max = %d%n", snapshot.getMax());
+ output.printf(locale, " mean = %2.2f%n", snapshot.getMean());
+ output.printf(locale, " stddev = %2.2f%n", snapshot.getStdDev());
+ output.printf(locale, " median = %2.2f%n", snapshot.getMedian());
+ output.printf(locale, " 75%% <= %2.2f%n", snapshot.get75thPercentile());
+ output.printf(locale, " 95%% <= %2.2f%n", snapshot.get95thPercentile());
+ output.printf(locale, " 98%% <= %2.2f%n", snapshot.get98thPercentile());
+ output.printf(locale, " 99%% <= %2.2f%n", snapshot.get99thPercentile());
+ output.printf(locale, " 99.9%% <= %2.2f%n", snapshot.get999thPercentile());
+ output.printf(locale, " count = %d%n", histogram.getCount());
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
index a48871fb0a0..cf5f7acf37c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
@@ -19,7 +19,7 @@
package org.apache.hadoop.hbase.mapreduce;
import com.google.protobuf.InvalidProtocolBufferException;
-import com.yammer.metrics.core.MetricsRegistry;
+import com.codahale.metrics.MetricRegistry;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@@ -339,7 +339,7 @@ public class TableMapReduceUtil {
if (addDependencyJars) {
addDependencyJars(job);
- addDependencyJars(job.getConfiguration(), MetricsRegistry.class);
+ addDependencyJars(job.getConfiguration(), MetricRegistry.class);
}
resetCacheConfig(job.getConfiguration());
@@ -785,7 +785,7 @@ public class TableMapReduceUtil {
com.google.protobuf.Message.class,
com.google.common.collect.Lists.class,
org.apache.htrace.Trace.class,
- com.yammer.metrics.core.MetricsRegistry.class);
+ com.codahale.metrics.MetricRegistry.class);
}
/**
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/YammerHistogramUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/YammerHistogramUtils.java
index 120f1707de5..201bdf2f569 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/YammerHistogramUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/YammerHistogramUtils.java
@@ -18,9 +18,9 @@
*/
package org.apache.hadoop.hbase.util;
-import com.yammer.metrics.core.Histogram;
-import com.yammer.metrics.stats.Sample;
-import com.yammer.metrics.stats.Snapshot;
+import com.codahale.metrics.Histogram;
+import com.codahale.metrics.Reservoir;
+import com.codahale.metrics.Snapshot;
import java.lang.reflect.Constructor;
import java.text.DecimalFormat;
@@ -37,13 +37,13 @@ public final class YammerHistogramUtils {
private static DecimalFormat DOUBLE_FORMAT = new DecimalFormat("#0.00");
/**
- * Create a new {@link com.yammer.metrics.core.Histogram} instance. These constructors are
+ * Create a new {@link com.codahale.metrics.Histogram} instance. These constructors are
* not public in 2.2.0, so we use reflection to find them.
*/
- public static Histogram newHistogram(Sample sample) {
+ public static Histogram newHistogram(Reservoir sample) {
try {
Constructor> ctor =
- Histogram.class.getDeclaredConstructor(Sample.class);
+ Histogram.class.getDeclaredConstructor(Reservoir.class);
ctor.setAccessible(true);
return (Histogram) ctor.newInstance(sample);
} catch (Exception e) {
@@ -54,10 +54,10 @@ public final class YammerHistogramUtils {
/** @return an abbreviated summary of {@code hist}. */
public static String getShortHistogramReport(final Histogram hist) {
Snapshot sn = hist.getSnapshot();
- return "mean=" + DOUBLE_FORMAT.format(hist.mean()) +
- ", min=" + DOUBLE_FORMAT.format(hist.min()) +
- ", max=" + DOUBLE_FORMAT.format(hist.max()) +
- ", stdDev=" + DOUBLE_FORMAT.format(hist.stdDev()) +
+ return "mean=" + DOUBLE_FORMAT.format(sn.getMean()) +
+ ", min=" + DOUBLE_FORMAT.format(sn.getMin()) +
+ ", max=" + DOUBLE_FORMAT.format(sn.getMax()) +
+ ", stdDev=" + DOUBLE_FORMAT.format(sn.getStdDev()) +
", 95th=" + DOUBLE_FORMAT.format(sn.get95thPercentile()) +
", 99th=" + DOUBLE_FORMAT.format(sn.get99thPercentile());
}
@@ -65,10 +65,10 @@ public final class YammerHistogramUtils {
/** @return a summary of {@code hist}. */
public static String getHistogramReport(final Histogram hist) {
Snapshot sn = hist.getSnapshot();
- return ", mean=" + DOUBLE_FORMAT.format(hist.mean()) +
- ", min=" + DOUBLE_FORMAT.format(hist.min()) +
- ", max=" + DOUBLE_FORMAT.format(hist.max()) +
- ", stdDev=" + DOUBLE_FORMAT.format(hist.stdDev()) +
+ return ", mean=" + DOUBLE_FORMAT.format(sn.getMean()) +
+ ", min=" + DOUBLE_FORMAT.format(sn.getMin()) +
+ ", max=" + DOUBLE_FORMAT.format(sn.getMax()) +
+ ", stdDev=" + DOUBLE_FORMAT.format(sn.getStdDev()) +
", 50th=" + DOUBLE_FORMAT.format(sn.getMedian()) +
", 75th=" + DOUBLE_FORMAT.format(sn.get75thPercentile()) +
", 95th=" + DOUBLE_FORMAT.format(sn.get95thPercentile()) +
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
index 33b50d41767..30629a35b85 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
@@ -93,9 +93,9 @@ import org.apache.htrace.impl.ProbabilitySampler;
import com.google.common.base.Objects;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import com.yammer.metrics.core.Histogram;
-import com.yammer.metrics.stats.Snapshot;
-import com.yammer.metrics.stats.UniformSample;
+import com.codahale.metrics.Histogram;
+import com.codahale.metrics.Snapshot;
+import com.codahale.metrics.UniformReservoir;
/**
* Script used evaluating HBase performance and scalability. Runs a HBase
@@ -1054,8 +1054,8 @@ public class PerformanceEvaluation extends Configured implements Tool {
this.connection = ConnectionFactory.createConnection(conf);
}
onStartup();
- latency = YammerHistogramUtils.newHistogram(new UniformSample(1024 * 500));
- valueSize = YammerHistogramUtils.newHistogram(new UniformSample(1024 * 500));
+ latency = YammerHistogramUtils.newHistogram(new UniformReservoir(1024 * 500));
+ valueSize = YammerHistogramUtils.newHistogram(new UniformReservoir(1024 * 500));
}
abstract void onStartup() throws IOException;
@@ -1121,21 +1121,21 @@ public class PerformanceEvaluation extends Configured implements Tool {
*/
private void reportLatency() throws IOException {
status.setStatus(testName + " latency log (microseconds), on " +
- latency.count() + " measures");
+ latency.getCount() + " measures");
reportHistogram(this.latency);
}
private void reportValueSize() throws IOException {
status.setStatus(testName + " valueSize after " +
- valueSize.count() + " measures");
+ valueSize.getCount() + " measures");
reportHistogram(this.valueSize);
}
private void reportHistogram(final Histogram h) throws IOException {
Snapshot sn = h.getSnapshot();
- status.setStatus(testName + " Min = " + h.min());
- status.setStatus(testName + " Avg = " + h.mean());
- status.setStatus(testName + " StdDev = " + h.stdDev());
+ status.setStatus(testName + " Min = " + sn.getMin());
+ status.setStatus(testName + " Avg = " + sn.getMean());
+ status.setStatus(testName + " StdDev = " + sn.getStdDev());
status.setStatus(testName + " 50th = " + sn.getMedian());
status.setStatus(testName + " 75th = " + sn.get75thPercentile());
status.setStatus(testName + " 95th = " + sn.get95thPercentile());
@@ -1143,7 +1143,7 @@ public class PerformanceEvaluation extends Configured implements Tool {
status.setStatus(testName + " 99.9th = " + sn.get999thPercentile());
status.setStatus(testName + " 99.99th = " + sn.getValue(0.9999));
status.setStatus(testName + " 99.999th = " + sn.getValue(0.99999));
- status.setStatus(testName + " Max = " + h.max());
+ status.setStatus(testName + " Max = " + sn.getMax());
}
/**
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPerformanceEvaluation.java
index e35fc082c58..fd33fe30bf0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPerformanceEvaluation.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPerformanceEvaluation.java
@@ -40,9 +40,9 @@ import org.junit.Ignore;
import org.junit.Test;
import org.junit.experimental.categories.Category;
-import com.yammer.metrics.core.Histogram;
-import com.yammer.metrics.stats.Snapshot;
-import com.yammer.metrics.stats.UniformSample;
+import com.codahale.metrics.Histogram;
+import com.codahale.metrics.Snapshot;
+import com.codahale.metrics.UniformReservoir;
@Category({MiscTests.class, SmallTests.class})
public class TestPerformanceEvaluation {
@@ -125,16 +125,16 @@ public class TestPerformanceEvaluation {
opts.setValueSize(valueSize);
RandomReadTest rrt = new RandomReadTest(null, opts, null);
Constructor> ctor =
- Histogram.class.getDeclaredConstructor(com.yammer.metrics.stats.Sample.class);
+ Histogram.class.getDeclaredConstructor(com.codahale.metrics.Reservoir.class);
ctor.setAccessible(true);
- Histogram histogram = (Histogram)ctor.newInstance(new UniformSample(1024 * 500));
+ Histogram histogram = (Histogram)ctor.newInstance(new UniformReservoir(1024 * 500));
for (int i = 0; i < 100; i++) {
histogram.update(rrt.getValueLength(null));
}
- double stddev = histogram.stdDev();
- assertTrue(stddev != 0 && stddev != 1.0);
- assertTrue(histogram.stdDev() != 0);
Snapshot snapshot = histogram.getSnapshot();
+ double stddev = snapshot.getStdDev();
+ assertTrue(stddev != 0 && stddev != 1.0);
+ assertTrue(snapshot.getStdDev() != 0);
double median = snapshot.getMedian();
assertTrue(median != 0 && median != 1 && median != valueSize);
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java
index 1efbe05fac1..bba38f77afe 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java
@@ -150,16 +150,17 @@ public class TestClientPushback {
MetricsConnection.RegionStats rsStats = conn.getConnectionMetrics().
serverStats.get(server).get(regionName);
assertEquals(name, rsStats.name);
- assertEquals(rsStats.heapOccupancyHist.mean(),
+ assertEquals(rsStats.heapOccupancyHist.getSnapshot().getMean(),
(double)regionStats.getHeapOccupancyPercent(), 0.1 );
- assertEquals(rsStats.memstoreLoadHist.mean(),
+ assertEquals(rsStats.memstoreLoadHist.getSnapshot().getMean(),
(double)regionStats.getMemstoreLoadPercent(), 0.1);
MetricsConnection.RunnerStats runnerStats = conn.getConnectionMetrics().runnerStats;
- assertEquals(runnerStats.delayRunners.count(), 1);
- assertEquals(runnerStats.normalRunners.count(), 1);
- assertEquals("", runnerStats.delayIntevalHist.mean(), (double)backoffTime, 0.1);
+ assertEquals(runnerStats.delayRunners.getCount(), 1);
+ assertEquals(runnerStats.normalRunners.getCount(), 1);
+ assertEquals("", runnerStats.delayIntevalHist.getSnapshot().getMean(),
+ (double)backoffTime, 0.1);
latch.await(backoffTime * 2, TimeUnit.MILLISECONDS);
assertNotEquals("AsyncProcess did not submit the work time", endTime.get(), 0);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java
index 7996c17a694..e138174aff0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java
@@ -62,10 +62,11 @@ import org.apache.htrace.Trace;
import org.apache.htrace.TraceScope;
import org.apache.htrace.impl.ProbabilitySampler;
-import com.yammer.metrics.core.Histogram;
-import com.yammer.metrics.core.Meter;
-import com.yammer.metrics.core.MetricsRegistry;
-import com.yammer.metrics.reporting.ConsoleReporter;
+import com.codahale.metrics.ConsoleReporter;
+import com.codahale.metrics.Histogram;
+import com.codahale.metrics.Meter;
+import com.codahale.metrics.MetricRegistry;
+import com.codahale.metrics.MetricFilter;
// imports for things that haven't moved from regionserver.wal yet.
import org.apache.hadoop.hbase.regionserver.wal.SecureProtobufLogReader;
@@ -73,6 +74,8 @@ import org.apache.hadoop.hbase.regionserver.wal.SecureProtobufLogWriter;
import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+import static com.codahale.metrics.MetricRegistry.name;
+
/**
* This class runs performance benchmarks for {@link WAL}.
* See usage for this tool by running:
@@ -81,20 +84,18 @@ import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
@InterfaceAudience.Private
public final class WALPerformanceEvaluation extends Configured implements Tool {
private static final Log LOG = LogFactory.getLog(WALPerformanceEvaluation.class.getName());
- private final MetricsRegistry metrics = new MetricsRegistry();
+ private final MetricRegistry metrics = new MetricRegistry();
private final Meter syncMeter =
- metrics.newMeter(WALPerformanceEvaluation.class, "syncMeter", "syncs", TimeUnit.MILLISECONDS);
- private final Histogram syncHistogram =
- metrics.newHistogram(WALPerformanceEvaluation.class, "syncHistogram", "nanos-between-syncs",
- true);
- private final Histogram syncCountHistogram =
- metrics.newHistogram(WALPerformanceEvaluation.class, "syncCountHistogram", "countPerSync",
- true);
- private final Meter appendMeter =
- metrics.newMeter(WALPerformanceEvaluation.class, "appendMeter", "bytes",
- TimeUnit.MILLISECONDS);
+ metrics.meter(name(WALPerformanceEvaluation.class, "syncMeter", "syncs"));
+
+ private final Histogram syncHistogram = metrics.histogram(
+ name(WALPerformanceEvaluation.class, "syncHistogram", "nanos-between-syncs"));
+ private final Histogram syncCountHistogram = metrics.histogram(
+ name(WALPerformanceEvaluation.class, "syncCountHistogram", "countPerSync"));
+ private final Meter appendMeter = metrics.meter(
+ name(WALPerformanceEvaluation.class, "appendMeter", "bytes"));
private final Histogram latencyHistogram =
- metrics.newHistogram(WALPerformanceEvaluation.class, "latencyHistogram", "nanos", true);
+ metrics.histogram(name(WALPerformanceEvaluation.class, "latencyHistogram", "nanos"));
private final MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
@@ -333,7 +334,10 @@ public final class WALPerformanceEvaluation extends Configured implements Tool {
benchmarks[i] = Trace.wrap(new WALPutBenchmark(regions[i], htd, numIterations, noSync,
syncInterval, traceFreq));
}
- ConsoleReporter.enable(this.metrics, 30, TimeUnit.SECONDS);
+ ConsoleReporter reporter = ConsoleReporter.forRegistry(metrics).
+ outputTo(System.out).convertRatesTo(TimeUnit.SECONDS).filter(MetricFilter.ALL).build();
+ reporter.start(30, TimeUnit.SECONDS);
+
long putTime = runBenchmark(benchmarks, numThreads);
logBenchmarkResult("Summary: threads=" + numThreads + ", iterations=" + numIterations +
", syncInterval=" + syncInterval, numIterations * numThreads, putTime);
diff --git a/hbase-shaded/pom.xml b/hbase-shaded/pom.xml
index 4e38c881833..10edf428d95 100644
--- a/hbase-shaded/pom.xml
+++ b/hbase-shaded/pom.xml
@@ -154,8 +154,8 @@
org.apache.hadoop.hbase.shaded.com.lmax
- com.yammer
- org.apache.hadoop.hbase.shaded.com.yammer
+ com.dropwizard
+ org.apache.hadoop.hbase.shaded.com.dropwizard
diff --git a/hbase-shell/pom.xml b/hbase-shell/pom.xml
index f80858bf718..7c3754e525b 100644
--- a/hbase-shell/pom.xml
+++ b/hbase-shell/pom.xml
@@ -235,7 +235,7 @@
- com.yammer.metrics
+ io.dropwizard.metrics
metrics-core
diff --git a/pom.xml b/pom.xml
index 0999120149c..d865b0ced91 100644
--- a/pom.xml
+++ b/pom.xml
@@ -1181,7 +1181,7 @@
3.2.2
3.1
- 2.2.0
+ 3.1.2
12.0.1
1.3.9
1.9.13
@@ -1429,14 +1429,14 @@
${log4j.version}
+ dropwizard and zk.-->
org.slf4j
slf4j-api
${slf4j.version}
- com.yammer.metrics
+ io.dropwizard.metrics
metrics-core
${metrics-core.version}