This class is usually preferable to {@link AtomicLong} when + * multiple threads update a common sum that is used for purposes such + * as collecting statistics, not for fine-grained synchronization + * control. Under low update contention, the two classes have similar + * characteristics. But under high contention, expected throughput of + * this class is significantly higher, at the expense of higher space + * consumption. + * + *
This class extends {@link Number}, but does not define + * methods such as {@code equals}, {@code hashCode} and {@code + * compareTo} because instances are expected to be mutated, and so are + * not useful as collection keys. + * + *
jsr166e note: This class is targeted to be placed in
+ * java.util.concurrent.atomic.
+ *
+ * @since 1.8
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public class LongAdder extends Striped64 implements Serializable {
+ private static final long serialVersionUID = 7249069246863182397L;
+
+ /**
+ * Version of plus for use in retryUpdate
+ */
+ final long fn(long v, long x) { return v + x; }
+
+ /**
+ * Creates a new adder with initial sum of zero.
+ */
+ public LongAdder() {
+ }
+
+ /**
+ * Adds the given value.
+ *
+ * @param x the value to add
+ */
+ public void add(long x) {
+ Cell[] as; long b, v; int[] hc; Cell a; int n;
+ if ((as = cells) != null || !casBase(b = base, b + x)) {
+ boolean uncontended = true;
+ if ((hc = threadHashCode.get()) == null ||
+ as == null || (n = as.length) < 1 ||
+ (a = as[(n - 1) & hc[0]]) == null ||
+ !(uncontended = a.cas(v = a.value, v + x)))
+ retryUpdate(x, hc, uncontended);
+ }
+ }
+
+ /**
+ * Equivalent to {@code add(1)}.
+ */
+ public void increment() {
+ add(1L);
+ }
+
+ /**
+ * Equivalent to {@code add(-1)}.
+ */
+ public void decrement() {
+ add(-1L);
+ }
+
+ /**
+ * Returns the current sum. The returned value is NOT an
+ * atomic snapshot; invocation in the absence of concurrent
+ * updates returns an accurate result, but concurrent updates that
+ * occur while the sum is being calculated might not be
+ * incorporated.
+ *
+ * @return the sum
+ */
+ public long sum() {
+ long sum = base;
+ Cell[] as = cells;
+ if (as != null) {
+ int n = as.length;
+ for (int i = 0; i < n; ++i) {
+ Cell a = as[i];
+ if (a != null)
+ sum += a.value;
+ }
+ }
+ return sum;
+ }
+
+ /**
+ * Resets variables maintaining the sum to zero. This method may
+ * be a useful alternative to creating a new adder, but is only
+ * effective if there are no concurrent updates. Because this
+ * method is intrinsically racy, it should only be used when it is
+ * known that no threads are concurrently updating.
+ */
+ public void reset() {
+ internalReset(0L);
+ }
+
+ /**
+ * Equivalent in effect to {@link #sum} followed by {@link
+ * #reset}. This method may apply for example during quiescent
+ * points between multithreaded computations. If there are
+ * updates concurrent with this method, the returned value is
+ * not guaranteed to be the final value occurring before
+ * the reset.
+ *
+ * @return the sum
+ */
+ public long sumThenReset() {
+ long sum = base;
+ Cell[] as = cells;
+ base = 0L;
+ if (as != null) {
+ int n = as.length;
+ for (int i = 0; i < n; ++i) {
+ Cell a = as[i];
+ if (a != null) {
+ sum += a.value;
+ a.value = 0L;
+ }
+ }
+ }
+ return sum;
+ }
+
+ /**
+ * Returns the String representation of the {@link #sum}.
+ * @return the String representation of the {@link #sum}
+ */
+ public String toString() {
+ return Long.toString(sum());
+ }
+
+ /**
+ * Equivalent to {@link #sum}.
+ *
+ * @return the sum
+ */
+ public long longValue() {
+ return sum();
+ }
+
+ /**
+ * Returns the {@link #sum} as an {@code int} after a narrowing
+ * primitive conversion.
+ */
+ public int intValue() {
+ return (int)sum();
+ }
+
+ /**
+ * Returns the {@link #sum} as a {@code float}
+ * after a widening primitive conversion.
+ */
+ public float floatValue() {
+ return (float)sum();
+ }
+
+ /**
+ * Returns the {@link #sum} as a {@code double} after a widening
+ * primitive conversion.
+ */
+ public double doubleValue() {
+ return (double)sum();
+ }
+
+ private void writeObject(ObjectOutputStream s) throws IOException {
+ s.defaultWriteObject();
+ s.writeLong(sum());
+ }
+
+ private void readObject(ObjectInputStream s)
+ throws IOException, ClassNotFoundException {
+ s.defaultReadObject();
+ busy = 0;
+ cells = null;
+ base = s.readLong();
+ }
+
+}
\ No newline at end of file
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Striped64.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Striped64.java
new file mode 100644
index 00000000000..36f2fce59f9
--- /dev/null
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Striped64.java
@@ -0,0 +1,356 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package org.apache.hadoop.hbase.util;
+
+import java.util.Random;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
+/**
+ * A package-local class holding common representation and mechanics
+ * for classes supporting dynamic striping on 64bit values. The class
+ * extends Number so that concrete subclasses must publicly do so.
+ */
+@InterfaceAudience.Private
+abstract class Striped64 extends Number {
+ /*
+ * This class maintains a lazily-initialized table of atomically
+ * updated variables, plus an extra "base" field. The table size
+ * is a power of two. Indexing uses masked per-thread hash codes.
+ * Nearly all declarations in this class are package-private,
+ * accessed directly by subclasses.
+ *
+ * Table entries are of class Cell; a variant of AtomicLong padded
+ * to reduce cache contention on most processors. Padding is
+ * overkill for most Atomics because they are usually irregularly
+ * scattered in memory and thus don't interfere much with each
+ * other. But Atomic objects residing in arrays will tend to be
+ * placed adjacent to each other, and so will most often share
+ * cache lines (with a huge negative performance impact) without
+ * this precaution.
+ *
+ * In part because Cells are relatively large, we avoid creating
+ * them until they are needed. When there is no contention, all
+ * updates are made to the base field. Upon first contention (a
+ * failed CAS on base update), the table is initialized to size 2.
+ * The table size is doubled upon further contention until
+ * reaching the nearest power of two greater than or equal to the
+ * number of CPUS. Table slots remain empty (null) until they are
+ * needed.
+ *
+ * A single spinlock ("busy") is used for initializing and
+ * resizing the table, as well as populating slots with new Cells.
+ * There is no need for a blocking lock; when the lock is not
+ * available, threads try other slots (or the base). During these
+ * retries, there is increased contention and reduced locality,
+ * which is still better than alternatives.
+ *
+ * Per-thread hash codes are initialized to random values.
+ * Contention and/or table collisions are indicated by failed
+ * CASes when performing an update operation (see method
+ * retryUpdate). Upon a collision, if the table size is less than
+ * the capacity, it is doubled in size unless some other thread
+ * holds the lock. If a hashed slot is empty, and lock is
+ * available, a new Cell is created. Otherwise, if the slot
+ * exists, a CAS is tried. Retries proceed by "double hashing",
+ * using a secondary hash (Marsaglia XorShift) to try to find a
+ * free slot.
+ *
+ * The table size is capped because, when there are more threads
+ * than CPUs, supposing that each thread were bound to a CPU,
+ * there would exist a perfect hash function mapping threads to
+ * slots that eliminates collisions. When we reach capacity, we
+ * search for this mapping by randomly varying the hash codes of
+ * colliding threads. Because search is random, and collisions
+ * only become known via CAS failures, convergence can be slow,
+ * and because threads are typically not bound to CPUS forever,
+ * may not occur at all. However, despite these limitations,
+ * observed contention rates are typically low in these cases.
+ *
+ * It is possible for a Cell to become unused when threads that
+ * once hashed to it terminate, as well as in the case where
+ * doubling the table causes no thread to hash to it under
+ * expanded mask. We do not try to detect or remove such cells,
+ * under the assumption that for long-running instances, observed
+ * contention levels will recur, so the cells will eventually be
+ * needed again; and for short-lived ones, it does not matter.
+ */
+
+ /**
+ * Padded variant of AtomicLong supporting only raw accesses plus CAS.
+ * The value field is placed between pads, hoping that the JVM doesn't
+ * reorder them.
+ *
+ * JVM intrinsics note: It would be possible to use a release-only
+ * form of CAS here, if it were provided.
+ */
+ static final class Cell {
+ volatile long p0, p1, p2, p3, p4, p5, p6;
+ volatile long value;
+ volatile long q0, q1, q2, q3, q4, q5, q6;
+ Cell(long x) { value = x; }
+
+ final boolean cas(long cmp, long val) {
+ return UNSAFE.compareAndSwapLong(this, valueOffset, cmp, val);
+ }
+
+ // Unsafe mechanics
+ private static final sun.misc.Unsafe UNSAFE;
+ private static final long valueOffset;
+ static {
+ try {
+ UNSAFE = getUnsafe();
+ Class> ak = Cell.class;
+ valueOffset = UNSAFE.objectFieldOffset
+ (ak.getDeclaredField("value"));
+ } catch (Exception e) {
+ throw new Error(e);
+ }
+ }
+
+ }
+
+ /**
+ * ThreadLocal holding a single-slot int array holding hash code.
+ * Unlike the JDK8 version of this class, we use a suboptimal
+ * int[] representation to avoid introducing a new type that can
+ * impede class-unloading when ThreadLocals are not removed.
+ */
+ static final ThreadLocal
+ * These metrics will be available through the regular Hadoop metrics2 sinks (ganglia, opentsdb,
+ * etc) as well as JMX output. You can view a snapshot of the metrics by going to the http web UI
+ * of the master page, something like http://mymasterhost:16010/jmx
+ *
+ * These metrics will be available through the regular Hadoop metrics2 sinks (ganglia, opentsdb,
+ * etc) as well as JMX output. You can view a snapshot of the metrics by going to the http web UI
+ * of the regionserver page, something like http://myregionserverhost:16030/jmx
+ *
Developer note: + * Unlike the current metrics2 based approach, the new metrics approach + * (hbase-metrics-api and hbase-metrics modules) work by having different MetricRegistries that are + * initialized and used from the code that lives in their respective modules (hbase-server, etc). + * There is no need to define BaseSource classes and do a lot of indirection. The MetricRegistry'es + * will be in the global MetricRegistriesImpl, and this class will iterate over + * MetricRegistries.global() and register adapters to the metrics2 subsystem. These adapters then + * report the actual values by delegating to + * {@link HBaseMetrics2HadoopMetricsAdapter#snapshotAllMetrics(MetricRegistry, MetricsCollector)}. + * + * We do not initialize the Hadoop Metrics2 system assuming that other BaseSources already do so + * (see BaseSourceImpl). Once the last BaseSource is moved to the new system, the metric2 + * initialization should be moved here. + *
+ */ +public class GlobalMetricRegistriesAdapter { + + private static final Log LOG = LogFactory.getLog(GlobalMetricRegistriesAdapter.class); + + private class MetricsSourceAdapter implements MetricsSource { + private final MetricRegistry registry; + MetricsSourceAdapter(MetricRegistry registry) { + this.registry = registry; + } + + @Override + public void getMetrics(MetricsCollector collector, boolean all) { + metricsAdapter.snapshotAllMetrics(registry, collector); + } + } + + private final MetricsExecutor executor; + private final AtomicBoolean stopped; + private final DefaultMetricsSystemHelper helper; + private final HBaseMetrics2HadoopMetricsAdapter metricsAdapter; + private final HashMapThe result is maven-compiler-plugin can properly identify the scope of + * changed files + * + *
See more details in
+ *
+ * maven-compiler-plugin: incremental compilation broken
+ */
+@InterfaceAudience.Private
+@Retention(RetentionPolicy.SOURCE)
+public @interface PackageMarker {
+}
diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Snapshot.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Snapshot.java
new file mode 100644
index 00000000000..56ee8ae0e7b
--- /dev/null
+++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Snapshot.java
@@ -0,0 +1,136 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+package org.apache.hadoop.hbase.metrics;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
+/**
+ * A statictical sample of histogram values.
+ */
+@InterfaceAudience.Private
+public interface Snapshot {
+
+ /**
+ * Return the values with the given quantiles.
+ * @param quantiles the requested quantiles.
+ * @return the value for the quantiles.
+ */
+ long[] getQuantiles(double[] quantiles);
+
+ /**
+ * Return the values with the default quantiles.
+ * @return the value for default the quantiles.
+ */
+ long[] getQuantiles();
+
+ /**
+ * Returns the number of values in the snapshot.
+ *
+ * @return the number of values
+ */
+ long getCount();
+
+ /**
+ * Returns the total count below the given value
+ * @param val the value
+ * @return the total count below the given value
+ */
+ long getCountAtOrBelow(long val);
+
+ /**
+ * Returns the value at the 25th percentile in the distribution.
+ *
+ * @return the value at the 25th percentile
+ */
+ long get25thPercentile();
+
+ /**
+ * Returns the value at the 75th percentile in the distribution.
+ *
+ * @return the value at the 75th percentile
+ */
+ long get75thPercentile();
+
+ /**
+ * Returns the value at the 90th percentile in the distribution.
+ *
+ * @return the value at the 90th percentile
+ */
+ long get90thPercentile();
+
+ /**
+ * Returns the value at the 95th percentile in the distribution.
+ *
+ * @return the value at the 95th percentile
+ */
+ long get95thPercentile();
+
+ /**
+ * Returns the value at the 98th percentile in the distribution.
+ *
+ * @return the value at the 98th percentile
+ */
+ long get98thPercentile();
+
+ /**
+ * Returns the value at the 99th percentile in the distribution.
+ *
+ * @return the value at the 99th percentile
+ */
+ long get99thPercentile();
+
+ /**
+ * Returns the value at the 99.9th percentile in the distribution.
+ *
+ * @return the value at the 99.9th percentile
+ */
+ long get999thPercentile();
+
+ /**
+ * Returns the median value in the distribution.
+ *
+ * @return the median value
+ */
+ long getMedian();
+
+ /**
+ * Returns the highest value in the snapshot.
+ *
+ * @return the highest value
+ */
+ long getMax();
+
+ /**
+ * Returns the arithmetic mean of the values in the snapshot.
+ *
+ * @return the arithmetic mean
+ */
+ long getMean();
+
+ /**
+ * Returns the lowest value in the snapshot.
+ *
+ * @return the lowest value
+ */
+ long getMin();
+
+ // TODO: Dropwizard histograms also track stddev
+}
diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Timer.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Timer.java
new file mode 100644
index 00000000000..4fcb636d494
--- /dev/null
+++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Timer.java
@@ -0,0 +1,62 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.metrics;
+
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
+/**
+ * A metric which encompasses a {@link Histogram} and {@link Meter}.
+ */
+@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
+@InterfaceStability.Evolving
+public interface Timer extends Metric {
+ /**
+ * Update the timer with the given duration in given time unit.
+ * @param duration the duration of the event
+ * @param unit the time unit for the duration
+ */
+ void update(long duration, TimeUnit unit);
+
+ /**
+ * Update the timer with the given duration in milliseconds
+ * @param durationMillis the duration of the event in ms
+ */
+ void updateMillis(long durationMillis);
+
+ /**
+ * Update the timer with the given duration in microseconds
+ * @param durationMicros the duration of the event in microseconds
+ */
+ void updateMicros(long durationMicros);
+
+ /**
+ * Update the timer with the given duration in nanoseconds
+ * @param durationNanos the duration of the event in ns
+ */
+ void updateNanos(long durationNanos);
+
+ @InterfaceAudience.Private
+ Histogram getHistogram();
+
+ @InterfaceAudience.Private
+ Meter getMeter();
+}
\ No newline at end of file
diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/package-info.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/package-info.java
new file mode 100644
index 00000000000..e79451fe6ad
--- /dev/null
+++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/package-info.java
@@ -0,0 +1,25 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Metrics API for HBase.
+ */
+@PackageMarker
+package org.apache.hadoop.hbase.metrics;
+
+// End package-info.java
diff --git a/hbase-metrics-api/src/test/java/org/apache/hadoop/hbase/metrics/TestMetricRegistriesLoader.java b/hbase-metrics-api/src/test/java/org/apache/hadoop/hbase/metrics/TestMetricRegistriesLoader.java
new file mode 100644
index 00000000000..8746146ded1
--- /dev/null
+++ b/hbase-metrics-api/src/test/java/org/apache/hadoop/hbase/metrics/TestMetricRegistriesLoader.java
@@ -0,0 +1,56 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.metrics;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
+import static org.mockito.Mockito.mock;
+
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import com.google.common.collect.Lists;
+
+/**
+ * Test class for {@link MetricRegistriesLoader}.
+ */
+@Category(SmallTests.class)
+public class TestMetricRegistriesLoader {
+
+ @Test
+ public void testLoadSinleInstance() {
+ MetricRegistries loader = mock(MetricRegistries.class);
+ MetricRegistries instance = MetricRegistriesLoader.load(Lists.newArrayList(loader));
+ assertEquals(loader, instance);
+ }
+
+ @Test
+ public void testLoadMultipleInstances() {
+ MetricRegistries loader1 = mock(MetricRegistries.class);
+ MetricRegistries loader2 = mock(MetricRegistries.class);
+ MetricRegistries loader3 = mock(MetricRegistries.class);
+ MetricRegistries instance = MetricRegistriesLoader.load(Lists.newArrayList(loader1, loader2,
+ loader3));
+
+ // the load() returns the first instance
+ assertEquals(loader1, instance);
+ assertNotEquals(loader2, instance);
+ assertNotEquals(loader3, instance);
+ }
+}
\ No newline at end of file
diff --git a/hbase-metrics/README.txt b/hbase-metrics/README.txt
new file mode 100644
index 00000000000..d80064c2d6f
--- /dev/null
+++ b/hbase-metrics/README.txt
@@ -0,0 +1 @@
+See the documentation at hbase-metrics-api/README.
\ No newline at end of file
diff --git a/hbase-metrics/pom.xml b/hbase-metrics/pom.xml
new file mode 100644
index 00000000000..e32db772004
--- /dev/null
+++ b/hbase-metrics/pom.xml
@@ -0,0 +1,136 @@
+
+
See ExampleMasterObserverWithMetrics class in the hbase-examples modules for examples + * of how metrics can be instantiated and used.
+ * @return A MetricRegistry for the coprocessor class to track and export metrics. + */ + MetricRegistry getMetricRegistryForMaster(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MetricsCoprocessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MetricsCoprocessor.java new file mode 100644 index 00000000000..d564002b97c --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MetricsCoprocessor.java @@ -0,0 +1,136 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +package org.apache.hadoop.hbase.coprocessor; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.metrics.MetricRegistries; +import org.apache.hadoop.hbase.metrics.MetricRegistry; +import org.apache.hadoop.hbase.metrics.MetricRegistryInfo; + +import com.google.common.annotations.VisibleForTesting; + +/** + * Utility class for tracking metrics for various types of coprocessors. Each coprocessor instance + * creates its own MetricRegistry which is exported as an individual MetricSource. MetricRegistries + * are ref counted using the hbase-metric module interfaces. + */ +@InterfaceAudience.Private +public class MetricsCoprocessor { + + // Master coprocessor metrics + private static final String MASTER_COPROC_METRICS_NAME = "Coprocessor.Master"; + private static final String MASTER_COPROC_METRICS_CONTEXT = "master"; + private static final String MASTER_COPROC_METRICS_DESCRIPTION + = "Metrics about HBase MasterObservers"; + private static final String MASTER_COPROC_METRICS_JMX_CONTEXT + = "Master,sub=" + MASTER_COPROC_METRICS_NAME; + + // RegionServer coprocessor metrics + private static final String RS_COPROC_METRICS_NAME = "Coprocessor.RegionServer"; + private static final String RS_COPROC_METRICS_CONTEXT = "regionserver"; + private static final String RS_COPROC_METRICS_DESCRIPTION + = "Metrics about HBase RegionServerObservers"; + private static final String RS_COPROC_METRICS_JMX_CONTEXT = "RegionServer,sub=" + + RS_COPROC_METRICS_NAME; + + // Region coprocessor metrics + private static final String REGION_COPROC_METRICS_NAME = "Coprocessor.Region"; + private static final String REGION_COPROC_METRICS_CONTEXT = "regionserver"; + private static final String REGION_COPROC_METRICS_DESCRIPTION + = "Metrics about HBase RegionObservers"; + private static final String REGION_COPROC_METRICS_JMX_CONTEXT + = "RegionServer,sub=" + REGION_COPROC_METRICS_NAME; + + // WAL coprocessor metrics + private static final String WAL_COPROC_METRICS_NAME = "Coprocessor.WAL"; + private static final String WAL_COPROC_METRICS_CONTEXT = "regionserver"; + private static final String WAL_COPROC_METRICS_DESCRIPTION + = "Metrics about HBase WALObservers"; + private static final String WAL_COPROC_METRICS_JMX_CONTEXT + = "RegionServer,sub=" + WAL_COPROC_METRICS_NAME; + + private static String suffix(String metricName, String cpName) { + return new StringBuilder(metricName) + .append(".") + .append("CP_") + .append(cpName) + .toString(); + } + + @VisibleForTesting + static MetricRegistryInfo createRegistryInfoForMasterCoprocessor(String clazz) { + return new MetricRegistryInfo( + suffix(MASTER_COPROC_METRICS_NAME, clazz), + MASTER_COPROC_METRICS_DESCRIPTION, + suffix(MASTER_COPROC_METRICS_JMX_CONTEXT, clazz), + MASTER_COPROC_METRICS_CONTEXT, false); + } + + public static MetricRegistry createRegistryForMasterCoprocessor(String clazz) { + return MetricRegistries.global().create(createRegistryInfoForMasterCoprocessor(clazz)); + } + + @VisibleForTesting + static MetricRegistryInfo createRegistryInfoForRSCoprocessor(String clazz) { + return new MetricRegistryInfo( + suffix(RS_COPROC_METRICS_NAME, clazz), + RS_COPROC_METRICS_DESCRIPTION, + suffix(RS_COPROC_METRICS_JMX_CONTEXT, clazz), + RS_COPROC_METRICS_CONTEXT, false); + } + + public static MetricRegistry createRegistryForRSCoprocessor(String clazz) { + return MetricRegistries.global().create(createRegistryInfoForRSCoprocessor(clazz)); + } + + @VisibleForTesting + public static MetricRegistryInfo createRegistryInfoForRegionCoprocessor(String clazz) { + return new MetricRegistryInfo( + suffix(REGION_COPROC_METRICS_NAME, clazz), + REGION_COPROC_METRICS_DESCRIPTION, + suffix(REGION_COPROC_METRICS_JMX_CONTEXT, clazz), + REGION_COPROC_METRICS_CONTEXT, false); + } + + public static MetricRegistry createRegistryForRegionCoprocessor(String clazz) { + return MetricRegistries.global().create(createRegistryInfoForRegionCoprocessor(clazz)); + } + + @VisibleForTesting + public static MetricRegistryInfo createRegistryInfoForWALCoprocessor(String clazz) { + return new MetricRegistryInfo( + suffix(WAL_COPROC_METRICS_NAME, clazz), + WAL_COPROC_METRICS_DESCRIPTION, + suffix(WAL_COPROC_METRICS_JMX_CONTEXT, clazz), + WAL_COPROC_METRICS_CONTEXT, false); + } + + public static MetricRegistry createRegistryForWALCoprocessor(String clazz) { + return MetricRegistries.global().create(createRegistryInfoForWALCoprocessor(clazz)); + } + + public static void removeRegistry(MetricRegistry registry) { + if (registry == null) { + return; + } + MetricRegistries.global().remove(registry.getMetricRegistryInfo()); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionCoprocessorEnvironment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionCoprocessorEnvironment.java index bdf88af94ce..3566f069b94 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionCoprocessorEnvironment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionCoprocessorEnvironment.java @@ -21,11 +21,12 @@ package org.apache.hadoop.hbase.coprocessor; import java.util.concurrent.ConcurrentMap; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.metrics.MetricRegistry; import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.RegionServerServices; @@ -43,4 +44,23 @@ public interface RegionCoprocessorEnvironment extends CoprocessorEnvironment { /** @return shared data between all instances of this coprocessor */ ConcurrentMapSee ExampleRegionObserverWithMetrics class in the hbase-examples modules to see examples of how + * metrics can be instantiated and used.
+ * @return A MetricRegistry for the coprocessor class to track and export metrics. + */ + // Note: we are not exposing getMetricRegistryForRegion(). per-region metrics are already costly + // so we do not want to allow coprocessors to export metrics at the region level. We can allow + // getMetricRegistryForTable() to allow coprocessors to track metrics per-table, per-regionserver. + MetricRegistry getMetricRegistryForRegionServer(); + + } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerCoprocessorEnvironment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerCoprocessorEnvironment.java index 527df450ae7..f42556a47a9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerCoprocessorEnvironment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerCoprocessorEnvironment.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.coprocessor; import org.apache.hadoop.hbase.CoprocessorEnvironment; +import org.apache.hadoop.hbase.metrics.MetricRegistry; import org.apache.hadoop.hbase.regionserver.RegionServerServices; public interface RegionServerCoprocessorEnvironment extends CoprocessorEnvironment { @@ -28,4 +29,13 @@ public interface RegionServerCoprocessorEnvironment extends CoprocessorEnvironme * @return the region server services */ RegionServerServices getRegionServerServices(); + + /** + * Returns a MetricRegistry that can be used to track metrics at the region server level. + * + *See ExampleMasterObserverWithMetrics class in the hbase-examples modules for examples + * of how metrics can be instantiated and used.
+ * @return A MetricRegistry for the coprocessor class to track and export metrics. + */ + MetricRegistry getMetricRegistryForRegionServer(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALCoprocessorEnvironment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALCoprocessorEnvironment.java index a4ce5f1a0bf..0865d96296b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALCoprocessorEnvironment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALCoprocessorEnvironment.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.hbase.metrics.MetricRegistry; import org.apache.hadoop.hbase.wal.WAL; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @@ -30,4 +31,13 @@ import org.apache.hadoop.hbase.wal.WAL; public interface WALCoprocessorEnvironment extends CoprocessorEnvironment { /** @return reference to the region server's WAL */ WAL getWAL(); + + /** + * Returns a MetricRegistry that can be used to track metrics at the region server level. + * + *See ExampleRegionServerObserverWithMetrics class in the hbase-examples modules for examples + * of how metrics can be instantiated and used.
+ * @return A MetricRegistry for the coprocessor class to track and export metrics. + */ + MetricRegistry getMetricRegistryForRegionServer(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AgeSnapshot.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AgeSnapshot.java index 4c1ad23e46c..dd3bf256590 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AgeSnapshot.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AgeSnapshot.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hbase.io.hfile; -import org.apache.hadoop.hbase.util.FastLongHistogram; +import org.apache.hadoop.hbase.metrics.impl.FastLongHistogram; import org.codehaus.jackson.annotate.JsonIgnoreProperties; /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java index d868a1af42f..e5bb83b7799 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java @@ -25,7 +25,7 @@ import java.util.concurrent.ConcurrentSkipListSet; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.util.FastLongHistogram; +import org.apache.hadoop.hbase.metrics.impl.FastLongHistogram; import org.codehaus.jackson.JsonGenerationException; import org.codehaus.jackson.annotate.JsonIgnoreProperties; import org.codehaus.jackson.map.JsonMappingException; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java index f38ec70eec0..9249271cb68 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java @@ -21,9 +21,9 @@ package org.apache.hadoop.hbase.io.hfile; import java.util.concurrent.atomic.AtomicLong; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.metrics.impl.FastLongHistogram; import org.apache.hadoop.hbase.util.Counter; -import org.apache.hadoop.hbase.util.FastLongHistogram; /** * Class that implements cache metrics. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java index 9fb8d81d234..c7dd2823237 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java @@ -36,8 +36,14 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.coprocessor.*; +import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; +import org.apache.hadoop.hbase.coprocessor.CoprocessorService; +import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; +import org.apache.hadoop.hbase.coprocessor.MasterObserver; +import org.apache.hadoop.hbase.coprocessor.MetricsCoprocessor; +import org.apache.hadoop.hbase.coprocessor.ObserverContext; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; +import org.apache.hadoop.hbase.metrics.MetricRegistry; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas; import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription; @@ -59,18 +65,33 @@ public class MasterCoprocessorHost */ static class MasterEnvironment extends CoprocessorHost.Environment implements MasterCoprocessorEnvironment { - private MasterServices masterServices; + private final MasterServices masterServices; + private final MetricRegistry metricRegistry; public MasterEnvironment(final Class> implClass, final Coprocessor impl, final int priority, final int seq, final Configuration conf, final MasterServices services) { super(impl, priority, seq, conf); this.masterServices = services; + this.metricRegistry = + MetricsCoprocessor.createRegistryForMasterCoprocessor(implClass.getName()); } + @Override public MasterServices getMasterServices() { return masterServices; } + + @Override + public MetricRegistry getMetricRegistryForMaster() { + return metricRegistry; + } + + @Override + protected void shutdown() { + super.shutdown(); + MetricsCoprocessor.removeRegistry(this.metricRegistry); + } } private MasterServices masterServices; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java index 8bca6c5fc09..4c28763e055 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java @@ -17,10 +17,14 @@ */ package org.apache.hadoop.hbase.regionserver; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.hbase.CompatibilitySingletonFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; -import org.apache.hadoop.hbase.CompatibilitySingletonFactory; +import org.apache.hadoop.hbase.metrics.MetricRegistries; +import org.apache.hadoop.hbase.metrics.MetricRegistry; +import org.apache.hadoop.hbase.metrics.Timer; + +import com.google.common.annotations.VisibleForTesting; /** *
@@ -36,11 +40,20 @@ public class MetricsRegionServer {
private MetricsRegionServerSource serverSource;
private MetricsRegionServerWrapper regionServerWrapper;
+ private MetricRegistry metricRegistry;
+ private Timer bulkLoadTimer;
+
public MetricsRegionServer(MetricsRegionServerWrapper regionServerWrapper) {
this(regionServerWrapper,
CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class)
.createServer(regionServerWrapper));
+ // Create hbase-metrics module based metrics. The registry should already be registered by the
+ // MetricsRegionServerSource
+ metricRegistry = MetricRegistries.global().get(serverSource.getMetricRegistryInfo()).get();
+
+ // create and use metrics from the new hbase-metrics based registry.
+ bulkLoadTimer = metricRegistry.timer("Bulkload");
}
MetricsRegionServer(MetricsRegionServerWrapper regionServerWrapper,
@@ -131,4 +144,8 @@ public class MetricsRegionServer {
serverSource.updateCompactionInputSize(isMajor, inputBytes);
serverSource.updateCompactionOutputSize(isMajor, outputBytes);
}
+
+ public void updateBulkLoad(long millis) {
+ this.bulkLoadTimer.updateMillis(millis);
+ }
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index 9b270c6a762..3f23d2be958 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -197,6 +197,8 @@ import org.apache.hadoop.hbase.wal.WALSplitter;
import org.apache.hadoop.hbase.zookeeper.ZKSplitLog;
import org.apache.zookeeper.KeeperException;
+import com.google.common.annotations.VisibleForTesting;
+
/**
* Implements the regionserver RPC services.
*/
@@ -2061,6 +2063,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
@Override
public BulkLoadHFileResponse bulkLoadHFile(final RpcController controller,
final BulkLoadHFileRequest request) throws ServiceException {
+ long start = EnvironmentEdgeManager.currentTime();
try {
checkOpen();
requestCount.increment();
@@ -2089,6 +2092,11 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
return builder.build();
} catch (IOException ie) {
throw new ServiceException(ie);
+ } finally {
+ if (regionServer.metricsRegionServer != null) {
+ regionServer.metricsRegionServer.updateBulkLoad(
+ EnvironmentEdgeManager.currentTime() - start);
+ }
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
index 74ecbc8c9fc..3ecd9706d74 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
@@ -60,6 +60,7 @@ import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.coprocessor.CoprocessorService;
import org.apache.hadoop.hbase.coprocessor.EndpointObserver;
+import org.apache.hadoop.hbase.coprocessor.MetricsCoprocessor;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.RegionObserver;
@@ -70,6 +71,8 @@ import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.io.Reference;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
+import org.apache.hadoop.hbase.ipc.RpcServer;
+import org.apache.hadoop.hbase.metrics.MetricRegistry;
import org.apache.hadoop.hbase.regionserver.Region.Operation;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
import org.apache.hadoop.hbase.regionserver.querymatcher.DeleteTracker;
@@ -85,6 +88,11 @@ import com.google.common.collect.Lists;
import com.google.protobuf.Message;
import com.google.protobuf.Service;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
+import com.google.protobuf.Message;
+import com.google.protobuf.Service;
+
/**
* Implements the coprocessor environment and runtime support for coprocessors
* loaded within a {@link Region}.
@@ -103,7 +111,7 @@ public class RegionCoprocessorHost
private final boolean hasCustomPostScannerFilterRow;
/**
- *
+ *
* Encapsulation of the environment of each coprocessor
*/
static class RegionEnvironment extends CoprocessorHost.Environment
@@ -114,6 +122,7 @@ public class RegionCoprocessorHost
ConcurrentMap