From 7a3bb8aefe48bba27d2ccbbebe448e570ad22412 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Wed, 16 Sep 2020 21:11:47 +0800 Subject: [PATCH] HBASE-25037 Lots of thread pool are changed to non daemon after HBASE-24750 which causes trouble when shutting down (#2407) Signed-off-by: Viraj Jasani --- .../hbase/client/AsyncConnectionImpl.java | 6 ++-- .../hbase/client/ClusterStatusListener.java | 2 +- .../hadoop/hbase/ipc/AbstractRpcClient.java | 12 ++++---- .../hadoop/hbase/ipc/NettyRpcConnection.java | 2 +- .../org/apache/hadoop/hbase/util/Threads.java | 2 -- .../client/example/AsyncClientExample.java | 2 +- .../policies/TwoConcurrentActionPolicy.java | 14 +++++---- .../procedure2/RemoteProcedureDispatcher.java | 2 +- .../hadoop/hbase/ipc/FifoRpcScheduler.java | 16 +++++----- .../hbase/ipc/MasterFifoRpcScheduler.java | 19 +++++++----- .../hbase/master/ClusterStatusPublisher.java | 2 +- .../assignment/SplitTableRegionProcedure.java | 2 +- .../hbase/master/cleaner/DirScanPool.java | 13 ++++---- .../hbase/namequeues/NamedQueueRecorder.java | 8 ++--- .../hbase/procedure/ProcedureCoordinator.java | 15 +++++----- .../hbase/procedure/ProcedureMember.java | 11 +++---- ...egionServerFlushTableProcedureManager.java | 8 +++-- .../hbase/regionserver/MemStoreFlusher.java | 2 +- .../snapshot/RegionServerSnapshotManager.java | 18 ++++++----- .../hadoop/hbase/regionserver/wal/FSHLog.java | 5 ++-- .../security/access/ZKPermissionWatcher.java | 30 ++++++++++--------- .../hbase/snapshot/SnapshotManifest.java | 2 +- .../hbase/tool/HFileContentValidator.java | 2 +- .../org/apache/hadoop/hbase/util/FSUtils.java | 2 +- .../apache/hadoop/hbase/util/HBaseFsck.java | 2 +- .../hadoop/hbase/util/ModifyRegionUtils.java | 13 ++++---- .../apache/hadoop/hbase/wal/OutputSink.java | 2 +- .../hadoop/hbase/AcidGuaranteesTestTool.java | 2 +- .../TestAsyncTableGetMultiThreaded.java | 2 +- .../TestOpenTableInCoprocessor.java | 6 ++-- .../master/assignment/TestRegionStates.java | 2 +- .../procedure/SimpleRSProcedureManager.java | 18 +++++------ .../TestRegionServerReportForDuty.java | 2 +- .../regionserver/wal/TestAsyncFSWAL.java | 6 ++-- .../regionserver/wal/TestAsyncWALReplay.java | 2 +- .../hadoop/hbase/util/TestHBaseFsckMOB.java | 7 ++--- .../hbase/thrift/IncrementCoalescer.java | 2 +- 37 files changed, 133 insertions(+), 130 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java index 83193600f01..c067c1c4ad4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java @@ -74,9 +74,9 @@ class AsyncConnectionImpl implements AsyncConnection { @VisibleForTesting static final HashedWheelTimer RETRY_TIMER = new HashedWheelTimer( - new ThreadFactoryBuilder().setNameFormat("Async-Client-Retry-Timer-pool-%d") - .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build(), 10, - TimeUnit.MILLISECONDS); + new ThreadFactoryBuilder().setNameFormat("Async-Client-Retry-Timer-pool-%d").setDaemon(true) + .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build(), + 10, TimeUnit.MILLISECONDS); private static final String RESOLVE_HOSTNAME_ON_FAIL_KEY = "hbase.resolve.hostnames.on.failure"; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java index a7e715c3683..1370d07c5fb 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java @@ -181,7 +181,7 @@ class ClusterStatusListener implements Closeable { private DatagramChannel channel; private final EventLoopGroup group = new NioEventLoopGroup(1, new ThreadFactoryBuilder().setNameFormat("hbase-client-clusterStatusListener-pool-%d") - .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); + .setDaemon(true).setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); public MulticastListener() { } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java index 064664e64d8..bf5130475d2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java @@ -92,13 +92,13 @@ public abstract class AbstractRpcClient implements RpcC public static final Logger LOG = LoggerFactory.getLogger(AbstractRpcClient.class); protected static final HashedWheelTimer WHEEL_TIMER = new HashedWheelTimer( - new ThreadFactoryBuilder().setNameFormat("RpcClient-timer-pool-%d") - .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build(), 10, - TimeUnit.MILLISECONDS); + new ThreadFactoryBuilder().setNameFormat("RpcClient-timer-pool-%d").setDaemon(true) + .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build(), + 10, TimeUnit.MILLISECONDS); - private static final ScheduledExecutorService IDLE_CONN_SWEEPER = Executors - .newScheduledThreadPool(1, - new ThreadFactoryBuilder().setNameFormat("Idle-Rpc-Conn-Sweeper-pool-%d") + private static final ScheduledExecutorService IDLE_CONN_SWEEPER = + Executors.newScheduledThreadPool(1, + new ThreadFactoryBuilder().setNameFormat("Idle-Rpc-Conn-Sweeper-pool-%d").setDaemon(true) .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); protected boolean running = true; // if client runs diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java index 3fd6ac9b5e1..fc9f9793021 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java @@ -79,7 +79,7 @@ class NettyRpcConnection extends RpcConnection { private static final ScheduledExecutorService RELOGIN_EXECUTOR = Executors .newSingleThreadScheduledExecutor(new ThreadFactoryBuilder().setNameFormat("Relogin-pool-%d") - .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); + .setDaemon(true).setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); private final NettyRpcClient rpcClient; diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Threads.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Threads.java index ab6f805fc82..8b185959982 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Threads.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Threads.java @@ -29,8 +29,6 @@ import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ThreadFactory; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; - import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.StringUtils; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/client/example/AsyncClientExample.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/client/example/AsyncClientExample.java index 8e34af9b6ad..b773ee89ff5 100644 --- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/client/example/AsyncClientExample.java +++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/client/example/AsyncClientExample.java @@ -131,7 +131,7 @@ public class AsyncClientExample extends Configured implements Tool { TableName tableName = TableName.valueOf(args[0]); int numOps = args.length > 1 ? Integer.parseInt(args[1]) : DEFAULT_NUM_OPS; ExecutorService threadPool = Executors.newFixedThreadPool(THREAD_POOL_SIZE, - new ThreadFactoryBuilder().setNameFormat("AsyncClientExample-pool-%d") + new ThreadFactoryBuilder().setNameFormat("AsyncClientExample-pool-%d").setDaemon(true) .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); // We use AsyncTable here so we need to provide a separated thread pool. RawAsyncTable does not // need a thread pool and may have a better performance if you use it correctly as it can save diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/policies/TwoConcurrentActionPolicy.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/policies/TwoConcurrentActionPolicy.java index 5c45d9c7471..271bb15c846 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/policies/TwoConcurrentActionPolicy.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/policies/TwoConcurrentActionPolicy.java @@ -18,15 +18,16 @@ package org.apache.hadoop.hbase.chaos.policies; -import org.apache.hadoop.hbase.DaemonThreadFactory; -import org.apache.hadoop.hbase.chaos.actions.Action; -import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey; -import org.apache.hadoop.util.StringUtils; - import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; +import org.apache.hadoop.hbase.chaos.actions.Action; +import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey; +import org.apache.hadoop.hbase.util.Threads; +import org.apache.hadoop.util.StringUtils; + +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; /** * Chaos Monkey policy that will run two different actions at the same time. @@ -42,7 +43,8 @@ public class TwoConcurrentActionPolicy extends PeriodicPolicy { this.actionsOne = actionsOne; this.actionsTwo = actionsTwo; executor = Executors.newFixedThreadPool(2, - new DaemonThreadFactory("TwoConcurrentAction-")); + new ThreadFactoryBuilder().setNameFormat("TwoConcurrentAction-pool-%d").setDaemon(true) + .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); } @Override diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java index dd76083da93..71d55ea9afb 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java @@ -102,7 +102,7 @@ public abstract class RemoteProcedureDispatcher(maxQueueLength), - new DaemonThreadFactory("FifoRpcScheduler.handler"), - new ThreadPoolExecutor.CallerRunsPolicy()); + this.executor = new ThreadPoolExecutor(handlerCount, handlerCount, 60, TimeUnit.SECONDS, + new ArrayBlockingQueue<>(maxQueueLength), + new ThreadFactoryBuilder().setNameFormat("FifoRpcScheduler.handler-pool-%d").setDaemon(true) + .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build(), + new ThreadPoolExecutor.CallerRunsPolicy()); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MasterFifoRpcScheduler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MasterFifoRpcScheduler.java index b596c40e750..6de676d74cf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MasterFifoRpcScheduler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MasterFifoRpcScheduler.java @@ -23,14 +23,15 @@ import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; - import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.DaemonThreadFactory; +import org.apache.hadoop.hbase.util.Threads; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; + /** * A special {@code }RpcScheduler} only used for master. This scheduler separates RegionServerReport * requests to independent handlers to avoid these requests block other requests. To use this @@ -71,13 +72,15 @@ public class MasterFifoRpcScheduler extends FifoRpcScheduler { this.getClass().getSimpleName(), handlerCount, maxQueueLength, rsReportHandlerCount, rsRsreportMaxQueueLength); this.executor = new ThreadPoolExecutor(handlerCount, handlerCount, 60, TimeUnit.SECONDS, - new ArrayBlockingQueue(maxQueueLength), - new DaemonThreadFactory("MasterFifoRpcScheduler.call.handler"), - new ThreadPoolExecutor.CallerRunsPolicy()); + new ArrayBlockingQueue<>(maxQueueLength), + new ThreadFactoryBuilder().setNameFormat("MasterFifoRpcScheduler.call.handler-pool-%d") + .setDaemon(true).setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build(), + new ThreadPoolExecutor.CallerRunsPolicy()); this.rsReportExecutor = new ThreadPoolExecutor(rsReportHandlerCount, rsReportHandlerCount, 60, - TimeUnit.SECONDS, new ArrayBlockingQueue(rsRsreportMaxQueueLength), - new DaemonThreadFactory("MasterFifoRpcScheduler.RSReport.handler"), - new ThreadPoolExecutor.CallerRunsPolicy()); + TimeUnit.SECONDS, new ArrayBlockingQueue<>(rsRsreportMaxQueueLength), + new ThreadFactoryBuilder().setNameFormat("MasterFifoRpcScheduler.RSReport.handler-pool-%d") + .setDaemon(true).setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build(), + new ThreadPoolExecutor.CallerRunsPolicy()); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java index 608f118b559..dd67c05eae0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java @@ -248,7 +248,7 @@ public class ClusterStatusPublisher extends ScheduledChore { private DatagramChannel channel; private final EventLoopGroup group = new NioEventLoopGroup(1, new ThreadFactoryBuilder().setNameFormat("hbase-master-clusterStatusPublisher-pool-%d") - .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); + .setDaemon(true).setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); public MulticastPublisher() { } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java index 2d91c552975..d0413360e6d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java @@ -680,7 +680,7 @@ public class SplitTableRegionProcedure LOG.info("pid=" + getProcId() + " splitting " + nbFiles + " storefiles, region=" + getParentRegion().getShortNameToLog() + ", threads=" + maxThreads); final ExecutorService threadPool = Executors.newFixedThreadPool(maxThreads, - new ThreadFactoryBuilder().setNameFormat("StoreFileSplitter-pool-%d") + new ThreadFactoryBuilder().setNameFormat("StoreFileSplitter-pool-%d").setDaemon(true) .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); final List>> futures = new ArrayList>>(nbFiles); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/DirScanPool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/DirScanPool.java index ca934749b28..87f15c70466 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/DirScanPool.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/DirScanPool.java @@ -17,17 +17,17 @@ */ package org.apache.hadoop.hbase.master.cleaner; -import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.DaemonThreadFactory; import org.apache.hadoop.hbase.conf.ConfigurationObserver; +import org.apache.hadoop.hbase.util.Threads; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; + /** * The thread pool used for scan directories */ @@ -51,10 +51,9 @@ public class DirScanPool implements ConfigurationObserver { } private static ThreadPoolExecutor initializePool(int size) { - ThreadPoolExecutor executor = new ThreadPoolExecutor(size, size, 1, TimeUnit.MINUTES, - new LinkedBlockingQueue<>(), new DaemonThreadFactory("dir-scan-pool")); - executor.allowCoreThreadTimeOut(true); - return executor; + return Threads.getBoundedCachedThreadPool(size, 1, TimeUnit.MINUTES, + new ThreadFactoryBuilder().setNameFormat("dir-scan-pool-%d").setDaemon(true) + .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/namequeues/NamedQueueRecorder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/namequeues/NamedQueueRecorder.java index b5e743eb2eb..4b89e84d446 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/namequeues/NamedQueueRecorder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/namequeues/NamedQueueRecorder.java @@ -63,12 +63,10 @@ public class NamedQueueRecorder { int eventCount = conf.getInt("hbase.namedqueue.ringbuffer.size", 1024); // disruptor initialization with BlockingWaitStrategy - this.disruptor = new Disruptor<>(RingBufferEnvelope::new, - getEventCount(eventCount), + this.disruptor = new Disruptor<>(RingBufferEnvelope::new, getEventCount(eventCount), new ThreadFactoryBuilder().setNameFormat(hostingThreadName + ".slowlog.append-pool-%d") - .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build(), - ProducerType.MULTI, - new BlockingWaitStrategy()); + .setDaemon(true).setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build(), + ProducerType.MULTI, new BlockingWaitStrategy()); this.disruptor.setDefaultExceptionHandler(new DisruptorExceptionHandler()); // initialize ringbuffer event handler diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureCoordinator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureCoordinator.java index d5800b12969..48e96cd3bdd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureCoordinator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureCoordinator.java @@ -28,16 +28,16 @@ import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.SynchronousQueue; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; - -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.hbase.errorhandling.ForeignException; +import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; +import org.apache.hadoop.hbase.util.Threads; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.DaemonThreadFactory; -import org.apache.hadoop.hbase.errorhandling.ForeignException; -import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; +import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.collect.MapMaker; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; /** * This is the master side of a distributed complex procedure execution. @@ -112,8 +112,9 @@ public class ProcedureCoordinator { public static ThreadPoolExecutor defaultPool(String coordName, int opThreads, long keepAliveMillis) { return new ThreadPoolExecutor(1, opThreads, keepAliveMillis, TimeUnit.MILLISECONDS, - new SynchronousQueue<>(), - new DaemonThreadFactory("(" + coordName + ")-proc-coordinator-pool")); + new SynchronousQueue<>(), + new ThreadFactoryBuilder().setNameFormat("(" + coordName + ")-proc-coordinator-pool-%d") + .setDaemon(true).setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureMember.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureMember.java index 5a55028884f..d41d9b037f9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureMember.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureMember.java @@ -25,14 +25,14 @@ import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.SynchronousQueue; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; - +import org.apache.hadoop.hbase.errorhandling.ForeignException; +import org.apache.hadoop.hbase.util.Threads; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.DaemonThreadFactory; -import org.apache.hadoop.hbase.errorhandling.ForeignException; import org.apache.hbase.thirdparty.com.google.common.collect.MapMaker; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; /** * Process to kick off and manage a running {@link Subprocedure} on a member. This is the @@ -86,8 +86,9 @@ public class ProcedureMember implements Closeable { public static ThreadPoolExecutor defaultPool(String memberName, int procThreads, long keepAliveMillis) { return new ThreadPoolExecutor(1, procThreads, keepAliveMillis, TimeUnit.MILLISECONDS, - new SynchronousQueue<>(), - new DaemonThreadFactory("member: '" + memberName + "' subprocedure-pool")); + new SynchronousQueue<>(), + new ThreadFactoryBuilder().setNameFormat("member: '" + memberName + "' subprocedure-pool-%d") + .setDaemon(true).setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java index cb5d54fc5da..f11f00b26bc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java @@ -27,10 +27,8 @@ import java.util.concurrent.ExecutorCompletionService; import java.util.concurrent.Future; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; -import org.apache.hadoop.hbase.DaemonThreadFactory; import org.apache.hadoop.hbase.DroppedSnapshotException; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.TableName; @@ -51,6 +49,9 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; + import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; /** @@ -227,7 +228,8 @@ public class RegionServerFlushTableProcedureManager extends RegionServerProcedur int threads = conf.getInt(CONCURENT_FLUSH_TASKS_KEY, DEFAULT_CONCURRENT_FLUSH_TASKS); this.name = name; executor = Threads.getBoundedCachedThreadPool(threads, keepAlive, TimeUnit.MILLISECONDS, - new DaemonThreadFactory("rs(" + name + ")-flush-proc-pool-")); + new ThreadFactoryBuilder().setNameFormat("rs(" + name + ")-flush-proc-pool-%d") + .setDaemon(true).setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); taskPool = new ExecutorCompletionService<>(executor); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java index 744bbbef797..1f6a3507e94 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java @@ -515,7 +515,7 @@ class MemStoreFlusher implements FlushRequester { synchronized void start(UncaughtExceptionHandler eh) { ThreadFactory flusherThreadFactory = new ThreadFactoryBuilder() .setNameFormat(server.getServerName().toShortString() + "-MemStoreFlusher-pool-%d") - .setUncaughtExceptionHandler(eh).build(); + .setDaemon(true).setUncaughtExceptionHandler(eh).build(); for (int i = 0; i < flushHandlers.length; i++) { flushHandlers[i] = new FlushHandler("MemStoreFlusher." + i); flusherThreadFactory.newThread(flushHandlers[i]); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java index 579bb24bc35..4f3e5d72bd7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java @@ -28,17 +28,11 @@ import java.util.concurrent.ExecutorCompletionService; import java.util.concurrent.Future; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; -import org.apache.hadoop.hbase.DaemonThreadFactory; import org.apache.hadoop.hbase.DroppedSnapshotException; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.util.Threads; -import org.apache.hadoop.hbase.zookeeper.ZKWatcher; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.yetus.audience.InterfaceStability; import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.errorhandling.ForeignException; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; @@ -53,12 +47,19 @@ import org.apache.hadoop.hbase.procedure.ZKProcedureMemberRpcs; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.RegionServerServices; -import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; import org.apache.hadoop.hbase.snapshot.SnapshotCreationException; +import org.apache.hadoop.hbase.util.Threads; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; + +import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; + /** * This manager class handles the work dealing with snapshots for a {@link HRegionServer}. *

@@ -284,7 +285,8 @@ public class RegionServerSnapshotManager extends RegionServerProcedureManager { int threads = conf.getInt(CONCURENT_SNAPSHOT_TASKS_KEY, DEFAULT_CONCURRENT_SNAPSHOT_TASKS); this.name = name; executor = Threads.getBoundedCachedThreadPool(threads, keepAlive, TimeUnit.MILLISECONDS, - new DaemonThreadFactory("rs(" + name + ")-snapshot-pool-")); + new ThreadFactoryBuilder().setNameFormat("rs(" + name + ")-snapshot-pool-%d") + .setDaemon(true).setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); taskPool = new ExecutorCompletionService<>(executor); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java index faf32edafe7..2227da703cf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java @@ -241,10 +241,9 @@ public class FSHLog extends AbstractFSWAL { String hostingThreadName = Thread.currentThread().getName(); // Using BlockingWaitStrategy. Stuff that is going on here takes so long it makes no sense // spinning as other strategies do. - this.disruptor = new Disruptor<>(RingBufferTruck::new, - getPreallocatedEventCount(), + this.disruptor = new Disruptor<>(RingBufferTruck::new, getPreallocatedEventCount(), new ThreadFactoryBuilder().setNameFormat(hostingThreadName + ".append-pool-%d") - .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build(), + .setDaemon(true).setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build(), ProducerType.MULTI, new BlockingWaitStrategy()); // Advance the ring buffer sequence so that it starts from 1 instead of 0, // because SyncFuture.NOT_DONE = 0. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java index db0e53502ed..8ff238cc38f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java @@ -18,19 +18,6 @@ package org.apache.hadoop.hbase.security.access; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.DaemonThreadFactory; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.zookeeper.ZKListener; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZKWatcher; -import org.apache.hadoop.hbase.zookeeper.ZNodePaths; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.zookeeper.KeeperException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.io.Closeable; import java.io.IOException; import java.util.List; @@ -41,6 +28,20 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.RejectedExecutionException; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.Threads; +import org.apache.hadoop.hbase.zookeeper.ZKListener; +import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZNodePaths; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.zookeeper.KeeperException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; /** * Handles synchronization of access control list entries and updates @@ -69,7 +70,8 @@ public class ZKPermissionWatcher extends ZKListener implements Closeable { String aclZnodeParent = conf.get("zookeeper.znode.acl.parent", ACL_NODE); this.aclZNode = ZNodePaths.joinZNode(watcher.getZNodePaths().baseZNode, aclZnodeParent); executor = Executors.newSingleThreadExecutor( - new DaemonThreadFactory("zk-permission-watcher")); + new ThreadFactoryBuilder().setNameFormat("zk-permission-watcher-pool-%d").setDaemon(true) + .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); } public void start() throws KeeperException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java index b6eb7a7d1e1..9112b21fac7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java @@ -616,7 +616,7 @@ public final class SnapshotManifest { public static ThreadPoolExecutor createExecutor(final Configuration conf, final String name) { int maxThreads = conf.getInt("hbase.snapshot.thread.pool.max", 8); return Threads.getBoundedCachedThreadPool(maxThreads, 30L, TimeUnit.SECONDS, - new ThreadFactoryBuilder().setNameFormat(name + "-pool-%d") + new ThreadFactoryBuilder().setNameFormat(name + "-pool-%d").setDaemon(true) .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/HFileContentValidator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/HFileContentValidator.java index 53f03029cad..2f648975724 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/HFileContentValidator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/HFileContentValidator.java @@ -109,7 +109,7 @@ public class HFileContentValidator extends AbstractHBaseTool { int availableProcessors = Runtime.getRuntime().availableProcessors(); int numThreads = conf.getInt("hfilevalidator.numthreads", availableProcessors); return Executors.newFixedThreadPool(numThreads, - new ThreadFactoryBuilder().setNameFormat("hfile-validator-pool-%d") + new ThreadFactoryBuilder().setNameFormat("hfile-validator-pool-%d").setDaemon(true) .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java index 3b22de46adc..19b122f36b1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java @@ -1638,7 +1638,7 @@ public final class FSUtils { // run in multiple threads final ExecutorService tpe = Executors.newFixedThreadPool(threadPoolSize, - new ThreadFactoryBuilder().setNameFormat("FSRegionQuery-pool-%d") + new ThreadFactoryBuilder().setNameFormat("FSRegionQuery-pool-%d").setDaemon(true) .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); try { // ignore all file status items that are not of interest diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index a58c681821b..6d763c406cb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -351,7 +351,7 @@ public class HBaseFsck extends Configured implements Closeable { private static ExecutorService createThreadPool(Configuration conf) { int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS); return new ScheduledThreadPoolExecutor(numThreads, - new ThreadFactoryBuilder().setNameFormat("hbasefsck-pool-%d") + new ThreadFactoryBuilder().setNameFormat("hbasefsck-pool-%d").setDaemon(true) .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java index 99691c8ac3a..b4e586392cf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java @@ -228,13 +228,12 @@ public abstract class ModifyRegionUtils { * "hbase.hregion.open.and.init.threads.max" property. */ static ThreadPoolExecutor getRegionOpenAndInitThreadPool(final Configuration conf, - final String threadNamePrefix, int regionNumber) { - int maxThreads = Math.min(regionNumber, conf.getInt( - "hbase.hregion.open.and.init.threads.max", 16)); - ThreadPoolExecutor regionOpenAndInitThreadPool = Threads. - getBoundedCachedThreadPool(maxThreads, 30L, TimeUnit.SECONDS, - new ThreadFactoryBuilder().setNameFormat(threadNamePrefix + "-pool-%d") - .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); + final String threadNamePrefix, int regionNumber) { + int maxThreads = + Math.min(regionNumber, conf.getInt("hbase.hregion.open.and.init.threads.max", 16)); + ThreadPoolExecutor regionOpenAndInitThreadPool = Threads.getBoundedCachedThreadPool(maxThreads, + 30L, TimeUnit.SECONDS, new ThreadFactoryBuilder().setNameFormat(threadNamePrefix + "-pool-%d") + .setDaemon(true).setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); return regionOpenAndInitThreadPool; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/OutputSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/OutputSink.java index 826febeaaef..443e41ccd63 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/OutputSink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/OutputSink.java @@ -77,7 +77,7 @@ public abstract class OutputSink { this.controller = controller; this.entryBuffers = entryBuffers; this.closeThreadPool = Threads.getBoundedCachedThreadPool(numThreads, 30L, TimeUnit.SECONDS, - new ThreadFactoryBuilder().setNameFormat("split-log-closeStream-pool-%d") + new ThreadFactoryBuilder().setNameFormat("split-log-closeStream-pool-%d").setDaemon(true) .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); this.closeCompletionService = new ExecutorCompletionService<>(closeThreadPool); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/AcidGuaranteesTestTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/AcidGuaranteesTestTool.java index a58648f45bb..4faf86dd4e6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/AcidGuaranteesTestTool.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/AcidGuaranteesTestTool.java @@ -93,7 +93,7 @@ public class AcidGuaranteesTestTool extends AbstractHBaseTool { ThreadPoolExecutor tpe = new ThreadPoolExecutor(coreThreads, maxThreads, keepAliveTime, TimeUnit.SECONDS, workQueue, - new ThreadFactoryBuilder().setNameFormat(toString() + "-shared-pool-%d") + new ThreadFactoryBuilder().setNameFormat(toString() + "-shared-pool-%d").setDaemon(true) .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); tpe.allowCoreThreadTimeOut(true); return tpe; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java index 6d89299a7e5..eedfcf2359f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java @@ -139,7 +139,7 @@ public class TestAsyncTableGetMultiThreaded { int numThreads = 7; AtomicBoolean stop = new AtomicBoolean(false); ExecutorService executor = Executors.newFixedThreadPool(numThreads, - new ThreadFactoryBuilder().setNameFormat("TestAsyncGet-pool-%d") + new ThreadFactoryBuilder().setNameFormat("TestAsyncGet-pool-%d").setDaemon(true) .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); List> futures = new ArrayList<>(); IntStream.range(0, numThreads).forEach(i -> futures.add(executor.submit(() -> { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestOpenTableInCoprocessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestOpenTableInCoprocessor.java index 8ab0ca57860..8e3fe8017a1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestOpenTableInCoprocessor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestOpenTableInCoprocessor.java @@ -101,9 +101,9 @@ public class TestOpenTableInCoprocessor { private ExecutorService getPool() { int maxThreads = 1; long keepAliveTime = 60; - ThreadPoolExecutor pool = - new ThreadPoolExecutor(1, maxThreads, keepAliveTime, TimeUnit.SECONDS, - new SynchronousQueue<>(), new ThreadFactoryBuilder().setNameFormat("hbase-table-pool-%d") + ThreadPoolExecutor pool = new ThreadPoolExecutor(1, maxThreads, keepAliveTime, + TimeUnit.SECONDS, new SynchronousQueue<>(), + new ThreadFactoryBuilder().setNameFormat("hbase-table-pool-%d").setDaemon(true) .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); pool.allowCoreThreadTimeOut(true); return pool; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStates.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStates.java index dbad831dcfe..48cca305700 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStates.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStates.java @@ -63,7 +63,7 @@ public class TestRegionStates { @BeforeClass public static void setUp() throws Exception { threadPool = Threads.getBoundedCachedThreadPool(32, 60L, TimeUnit.SECONDS, - new ThreadFactoryBuilder().setNameFormat("ProcedureDispatcher-pool-%d") + new ThreadFactoryBuilder().setNameFormat("ProcedureDispatcher-pool-%d").setDaemon(true) .setUncaughtExceptionHandler((t, e) -> LOG.warn("Failed thread " + t.getName(), e)) .build()); executorService = new ExecutorCompletionService(threadPool); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java index 270f6d95c9e..66a45aa219c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java @@ -25,22 +25,22 @@ import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorCompletionService; import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; import java.util.concurrent.Future; -import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; -import org.apache.hadoop.hbase.DaemonThreadFactory; -import org.apache.hadoop.hbase.regionserver.RegionServerServices; -import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.hbase.errorhandling.ForeignException; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; +import org.apache.hadoop.hbase.regionserver.RegionServerServices; +import org.apache.hadoop.hbase.util.Threads; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; + public class SimpleRSProcedureManager extends RegionServerProcedureManager { private static final Logger LOG = LoggerFactory.getLogger(SimpleRSProcedureManager.class); @@ -125,9 +125,9 @@ public class SimpleRSProcedureManager extends RegionServerProcedureManager { public SimpleSubprocedurePool(String name, Configuration conf) { this.name = name; - executor = new ThreadPoolExecutor(1, 1, 500, - TimeUnit.SECONDS, new LinkedBlockingQueue<>(), - new DaemonThreadFactory("rs(" + name + ")-procedure-pool-")); + executor = Executors.newSingleThreadExecutor( + new ThreadFactoryBuilder().setNameFormat("rs(" + name + ")-procedure-pool-%d") + .setDaemon(true).setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); taskPool = new ExecutorCompletionService<>(executor); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java index 57b49992606..da395512f7d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java @@ -234,7 +234,7 @@ public class TestRegionServerReportForDuty { @Test public void testReportForDutyWithRSRpcRetry() throws Exception { ScheduledThreadPoolExecutor scheduledThreadPoolExecutor = new ScheduledThreadPoolExecutor(1, - new ThreadFactoryBuilder().setNameFormat("RSDelayedStart-pool-%d") + new ThreadFactoryBuilder().setNameFormat("RSDelayedStart-pool-%d").setDaemon(true) .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); // Start a master and wait for it to become the active/primary master. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWAL.java index bdb22ee6ec0..f413baf30d1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWAL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWAL.java @@ -83,9 +83,9 @@ public class TestAsyncFSWAL extends AbstractTestFSWAL { @BeforeClass public static void setUpBeforeClass() throws Exception { - GROUP = new NioEventLoopGroup(1, - new ThreadFactoryBuilder().setNameFormat("TestAsyncFSWAL-pool-%d") - .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); + GROUP = + new NioEventLoopGroup(1, new ThreadFactoryBuilder().setNameFormat("TestAsyncFSWAL-pool-%d") + .setDaemon(true).setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); CHANNEL_CLASS = NioSocketChannel.class; AbstractTestFSWAL.setUpBeforeClass(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncWALReplay.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncWALReplay.java index e5e1f0540e8..59ae1b0b894 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncWALReplay.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncWALReplay.java @@ -53,7 +53,7 @@ public class TestAsyncWALReplay extends AbstractTestWALReplay { @BeforeClass public static void setUpBeforeClass() throws Exception { GROUP = new NioEventLoopGroup(1, - new ThreadFactoryBuilder().setNameFormat("TestAsyncWALReplay-pool-%d") + new ThreadFactoryBuilder().setNameFormat("TestAsyncWALReplay-pool-%d").setDaemon(true) .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); CHANNEL_CLASS = NioSocketChannel.class; Configuration conf = AbstractTestWALReplay.TEST_UTIL.getConfiguration(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckMOB.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckMOB.java index 12a4412f154..b80ed51501c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckMOB.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckMOB.java @@ -66,10 +66,9 @@ public class TestHBaseFsckMOB extends BaseTestHBaseFsck { conf.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, 8 * REGION_ONLINE_TIMEOUT); TEST_UTIL.startMiniCluster(1); - tableExecutorService = - new ThreadPoolExecutor(1, POOL_SIZE, 60, TimeUnit.SECONDS, new SynchronousQueue<>(), - new ThreadFactoryBuilder().setNameFormat("testhbck-pool-%d") - .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); + tableExecutorService = new ThreadPoolExecutor(1, POOL_SIZE, 60, TimeUnit.SECONDS, + new SynchronousQueue<>(), new ThreadFactoryBuilder().setNameFormat("testhbck-pool-%d") + .setDaemon(true).setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); hbfsckExecutorService = new ScheduledThreadPoolExecutor(POOL_SIZE); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescer.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescer.java index b8707b20cdc..6a204ea0045 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescer.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescer.java @@ -145,7 +145,7 @@ public class IncrementCoalescer implements IncrementCoalescerMBean { this.handler = hand; LinkedBlockingQueue queue = new LinkedBlockingQueue<>(); pool = new ThreadPoolExecutor(CORE_POOL_SIZE, CORE_POOL_SIZE, 50, TimeUnit.MILLISECONDS, queue, - new ThreadFactoryBuilder().setNameFormat("IncrementCoalescer-pool-%d") + new ThreadFactoryBuilder().setNameFormat("IncrementCoalescer-pool-%d").setDaemon(true) .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); MBeans.register("thrift", "Thrift", this); }