diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java index 70b5242c7a3..98c29e7f6d3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java @@ -59,7 +59,6 @@ import org.apache.hadoop.hbase.http.jmx.JMXJsonServlet; import org.apache.hadoop.hbase.http.log.LogLevel; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.util.ReflectionUtils; -import org.apache.hadoop.metrics.MetricsServlet; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authentication.server.AuthenticationFilter; @@ -710,7 +709,17 @@ public class HttpServer implements FilterContainer { // set up default servlets addServlet("stacks", "/stacks", StackServlet.class); addServlet("logLevel", "/logLevel", LogLevel.Servlet.class); - addServlet("metrics", "/metrics", MetricsServlet.class); + + // Hadoop3 has moved completely to metrics2, and dropped support for Metrics v1's + // MetricsServlet (see HADOOP-12504). We'll using reflection to load if against hadoop2. + // Remove when we drop support for hbase on hadoop2.x. + try { + Class clz = Class.forName("org.apache.hadoop.metrics.MetricsServlet"); + addServlet("metrics", "/metrics", clz); + } catch (Exception e) { + // do nothing + } + addServlet("jmx", "/jmx", JMXJsonServlet.class); addServlet("conf", "/conf", ConfServlet.class); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index a11d367e465..e5519f77bc4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -190,7 +190,7 @@ import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperNodeTracker; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.hadoop.ipc.RemoteException; -import org.apache.hadoop.metrics.util.MBeanUtil; +import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.StringUtils; import org.apache.zookeeper.KeeperException; @@ -1031,7 +1031,7 @@ public class HRegionServer extends HasThread implements } // Run shutdown. if (mxBean != null) { - MBeanUtil.unregisterMBean(mxBean); + MBeans.unregister(mxBean); mxBean = null; } if (this.leases != null) this.leases.closeAfterLeasesExpire(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoad.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoad.java index 6ba12a935ce..caf09ad90ca 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoad.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoad.java @@ -273,7 +273,7 @@ public class TestBulkLoad { HFile.WriterFactory hFileFactory = HFile.getWriterFactoryNoCache(conf); // TODO We need a way to do this without creating files File hFileLocation = testFolder.newFile(); - FSDataOutputStream out = new FSDataOutputStream(new FileOutputStream(hFileLocation)); + FSDataOutputStream out = new FSDataOutputStream(new FileOutputStream(hFileLocation), null); try { hFileFactory.withOutputStream(out); hFileFactory.withFileContext(new HFileContext()); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescer.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescer.java index e937f2db740..2f4336b81f8 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescer.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescer.java @@ -40,7 +40,7 @@ import org.apache.hadoop.hbase.thrift.ThriftServerRunner.HBaseHandler; import org.apache.hadoop.hbase.thrift.generated.TIncrement; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Threads; -import org.apache.hadoop.metrics.util.MBeanUtil; +import org.apache.hadoop.metrics2.util.MBeans; import org.apache.thrift.TException; /** @@ -171,7 +171,7 @@ public class IncrementCoalescer implements IncrementCoalescerMBean { new ThreadPoolExecutor(CORE_POOL_SIZE, CORE_POOL_SIZE, 50, TimeUnit.MILLISECONDS, queue, Threads.newDaemonThreadFactory("IncrementCoalescer")); - MBeanUtil.registerMBean("thrift", "Thrift", this); + MBeans.register("thrift", "Thrift", this); } public boolean queueIncrement(TIncrement inc) throws TException {