HBASE-27195 Clean up netty worker/thread pool configuration (#4619)

The configuration settings "hbase.netty.worker.count" and
"hbase.netty.eventloop.rpcserver.thread.count" appear to duplicate each
other.

Also, formalizes another setting found in NettyEventLoopGroupConfig,
"hbase.netty.nativetransport".

Also, native epoll is not limited to amd64. aarch64 supports it too.

Signed-off-by: Duo Zhang <zhangduo@apache.org>

Conflicts:
	hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java
This commit is contained in:
Andrew Purtell 2022-07-18 15:48:40 -07:00
parent f76c0d9021
commit d19805a7bc
4 changed files with 42 additions and 38 deletions

View File

@ -42,12 +42,15 @@ public class JVM {
private static final boolean ibmvendor =
System.getProperty("java.vendor") != null && System.getProperty("java.vendor").contains("IBM");
private static final boolean windows =
System.getProperty("os.name") != null && System.getProperty("os.name").startsWith("Windows");
private static final boolean linux =
System.getProperty("os.name") != null && System.getProperty("os.name").startsWith("Linux");
// At least on my systems os.name reports as "linux", not "Linux". Prefer case insensitive tests.
private static final boolean windows = System.getProperty("os.name") != null
&& System.getProperty("os.name").toLowerCase().contains("windows");
private static final boolean linux = System.getProperty("os.name") != null
&& System.getProperty("os.name").toLowerCase().contains("linux");
private static final boolean amd64 =
System.getProperty("os.arch") != null && System.getProperty("os.arch").contains("amd64");
private static final boolean aarch64 =
System.getProperty("os.arch") != null && System.getProperty("os.arch").contains("aarch64");
private static final String JVMVersion = System.getProperty("java.version");
@ -99,6 +102,14 @@ public class JVM {
return amd64;
}
/**
* Check if the arch is aarch64;
* @return whether this is aarch64 or not.
*/
public static boolean isAarch64() {
return aarch64;
}
/**
* Check if the finish() method of GZIPOutputStream is broken
* @return whether GZIPOutputStream.finish() is broken.

View File

@ -53,10 +53,7 @@ import org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup;
import org.apache.hbase.thirdparty.io.netty.channel.ServerChannel;
import org.apache.hbase.thirdparty.io.netty.channel.group.ChannelGroup;
import org.apache.hbase.thirdparty.io.netty.channel.group.DefaultChannelGroup;
import org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoopGroup;
import org.apache.hbase.thirdparty.io.netty.channel.socket.nio.NioServerSocketChannel;
import org.apache.hbase.thirdparty.io.netty.handler.codec.FixedLengthFrameDecoder;
import org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultThreadFactory;
import org.apache.hbase.thirdparty.io.netty.util.concurrent.GlobalEventExecutor;
/**
@ -67,14 +64,6 @@ import org.apache.hbase.thirdparty.io.netty.util.concurrent.GlobalEventExecutor;
public class NettyRpcServer extends RpcServer {
public static final Logger LOG = LoggerFactory.getLogger(NettyRpcServer.class);
/**
* Name of property to change netty rpc server eventloop thread count. Default is 0. Tests may set
* this down from unlimited.
*/
public static final String HBASE_NETTY_EVENTLOOP_RPCSERVER_THREADCOUNT_KEY =
"hbase.netty.eventloop.rpcserver.thread.count";
private static final int EVENTLOOP_THREADCOUNT_DEFAULT = 0;
/**
* Name of property to change the byte buf allocator for the netty channels. Default is no value,
* which causes us to use PooledByteBufAllocator. Valid settings here are "pooled", "unpooled",
@ -103,21 +92,16 @@ public class NettyRpcServer extends RpcServer {
super(server, name, services, bindAddress, conf, scheduler, reservoirEnabled);
this.bindAddress = bindAddress;
this.channelAllocator = getChannelAllocator(conf);
EventLoopGroup eventLoopGroup;
Class<? extends ServerChannel> channelClass;
// Get the event loop group configuration from the server class if available.
NettyEventLoopGroupConfig config = null;
if (server instanceof HRegionServer) {
NettyEventLoopGroupConfig config = ((HRegionServer) server).getEventLoopGroupConfig();
eventLoopGroup = config.group();
channelClass = config.serverChannelClass();
} else {
int threadCount = server == null
? EVENTLOOP_THREADCOUNT_DEFAULT
: server.getConfiguration().getInt(HBASE_NETTY_EVENTLOOP_RPCSERVER_THREADCOUNT_KEY,
EVENTLOOP_THREADCOUNT_DEFAULT);
eventLoopGroup = new NioEventLoopGroup(threadCount,
new DefaultThreadFactory("NettyRpcServer", true, Thread.MAX_PRIORITY));
channelClass = NioServerSocketChannel.class;
config = ((HRegionServer) server).getEventLoopGroupConfig();
}
if (config == null) {
config = new NettyEventLoopGroupConfig(conf, "NettyRpcServer");
}
EventLoopGroup eventLoopGroup = config.group();
Class<? extends ServerChannel> channelClass = config.serverChannelClass();
ServerBootstrap bootstrap = new ServerBootstrap().group(eventLoopGroup).channel(channelClass)
.childOption(ChannelOption.TCP_NODELAY, tcpNoDelay)
.childOption(ChannelOption.SO_KEEPALIVE, tcpKeepAlive)

View File

@ -37,6 +37,13 @@ import org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultThreadFactory
*/
@InterfaceAudience.Private
public class NettyEventLoopGroupConfig {
public static final String NETTY_WORKER_COUNT_KEY = "hbase.netty.worker.count";
public static final int DEFAULT_NETTY_WORKER_COUNT = 0;
public static final String NETTY_NATIVETRANSPORT_KEY = "hbase.netty.nativetransport";
public static final boolean DEFAULT_NETTY_NATIVETRANSPORT = true;
private final EventLoopGroup group;
private final Class<? extends ServerChannel> serverChannelClass;
@ -45,14 +52,21 @@ public class NettyEventLoopGroupConfig {
private static boolean useEpoll(Configuration conf) {
// Config to enable native transport.
boolean epollEnabled = conf.getBoolean("hbase.netty.nativetransport", true);
// Use the faster native epoll transport mechanism on linux if enabled
return epollEnabled && JVM.isLinux() && JVM.isAmd64();
final boolean epollEnabled =
conf.getBoolean(NETTY_NATIVETRANSPORT_KEY, DEFAULT_NETTY_NATIVETRANSPORT);
// Use the faster native epoll transport mechanism on linux if enabled and the
// hardware architecture is either amd64 or aarch64. Netty is known to have native
// epoll support for these combinations.
return epollEnabled && JVM.isLinux() && (JVM.isAmd64() || JVM.isAarch64());
}
public NettyEventLoopGroupConfig(Configuration conf, String threadPoolName) {
boolean useEpoll = useEpoll(conf);
int workerCount = conf.getInt("hbase.netty.worker.count", 0);
final boolean useEpoll = useEpoll(conf);
final int workerCount = conf.getInt(NETTY_WORKER_COUNT_KEY,
// For backwards compatibility we also need to consider
// "hbase.netty.eventloop.rpcserver.thread.count"
// if it is defined in site configuration instead.
conf.getInt("hbase.netty.eventloop.rpcserver.thread.count", DEFAULT_NETTY_WORKER_COUNT));
ThreadFactory eventLoopThreadFactory =
new DefaultThreadFactory(threadPoolName, true, Thread.MAX_PRIORITY);
if (useEpoll) {

View File

@ -277,9 +277,4 @@
<value>3</value>
<description>Default is unbounded</description>
</property>
<property>
<name>hbase.netty.eventloop.rpcserver.thread.count</name>
<value>3</value>
<description>Default is unbounded</description>
</property>
</configuration>