HBASE-16711 Fix hadoop-3.0 profile compile

Eliminates use of removed or deprecated hadoop2 api
- MBeanUtil -> MBeans Hadoop2 has both; Hadoop 3 removes MBeanUtil and uses MBeans
- FSDataOutputStream(OutputStream) -> FSDataOutputStream(OutputStream, FileSystem.Statistics)
- MetricsServlet is removed.  See HADOOP-12504
This commit is contained in:
Jonathan M Hsieh 2016-09-25 05:18:04 -07:00
parent 47e12fb3a0
commit 09a31bd1e9
4 changed files with 16 additions and 7 deletions

View File

@ -59,7 +59,6 @@ import org.apache.hadoop.hbase.http.jmx.JMXJsonServlet;
import org.apache.hadoop.hbase.http.log.LogLevel;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hbase.util.ReflectionUtils;
import org.apache.hadoop.metrics.MetricsServlet;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
@ -710,7 +709,17 @@ public class HttpServer implements FilterContainer {
// set up default servlets
addServlet("stacks", "/stacks", StackServlet.class);
addServlet("logLevel", "/logLevel", LogLevel.Servlet.class);
addServlet("metrics", "/metrics", MetricsServlet.class);
// Hadoop3 has moved completely to metrics2, and dropped support for Metrics v1's
// MetricsServlet (see HADOOP-12504). We'll using reflection to load if against hadoop2.
// Remove when we drop support for hbase on hadoop2.x.
try {
Class clz = Class.forName("org.apache.hadoop.metrics.MetricsServlet");
addServlet("metrics", "/metrics", clz);
} catch (Exception e) {
// do nothing
}
addServlet("jmx", "/jmx", JMXJsonServlet.class);
addServlet("conf", "/conf", ConfServlet.class);
}

View File

@ -190,7 +190,7 @@ import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperNodeTracker;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.metrics.util.MBeanUtil;
import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.StringUtils;
import org.apache.zookeeper.KeeperException;
@ -1031,7 +1031,7 @@ public class HRegionServer extends HasThread implements
}
// Run shutdown.
if (mxBean != null) {
MBeanUtil.unregisterMBean(mxBean);
MBeans.unregister(mxBean);
mxBean = null;
}
if (this.leases != null) this.leases.closeAfterLeasesExpire();

View File

@ -273,7 +273,7 @@ public class TestBulkLoad {
HFile.WriterFactory hFileFactory = HFile.getWriterFactoryNoCache(conf);
// TODO We need a way to do this without creating files
File hFileLocation = testFolder.newFile();
FSDataOutputStream out = new FSDataOutputStream(new FileOutputStream(hFileLocation));
FSDataOutputStream out = new FSDataOutputStream(new FileOutputStream(hFileLocation), null);
try {
hFileFactory.withOutputStream(out);
hFileFactory.withFileContext(new HFileContext());

View File

@ -40,7 +40,7 @@ import org.apache.hadoop.hbase.thrift.ThriftServerRunner.HBaseHandler;
import org.apache.hadoop.hbase.thrift.generated.TIncrement;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.metrics.util.MBeanUtil;
import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.thrift.TException;
/**
@ -171,7 +171,7 @@ public class IncrementCoalescer implements IncrementCoalescerMBean {
new ThreadPoolExecutor(CORE_POOL_SIZE, CORE_POOL_SIZE, 50, TimeUnit.MILLISECONDS, queue,
Threads.newDaemonThreadFactory("IncrementCoalescer"));
MBeanUtil.registerMBean("thrift", "Thrift", this);
MBeans.register("thrift", "Thrift", this);
}
public boolean queueIncrement(TIncrement inc) throws TException {