We never do any file IO or other blocking work on the transport threads so no tangible benefit can be derived from using more threads than CPUs for IO. There are however significant downsides to using more threads than necessary with Netty in particular. Since we use the default setting for `io.netty.allocator.useCacheForAllThreads` which is `true` we end up using up to `16MB` of thread local buffer cache for each transport thread. Meaning we potentially waste CPUs * 16MB of heap for unnecessary IO threads in addition to obvious inefficiencies of artificially adding extra context switches.
This commit is contained in:
parent
b718193a01
commit
14a042fbe5
|
@ -78,7 +78,7 @@ public class Netty4Transport extends TcpTransport {
|
|||
|
||||
public static final Setting<Integer> WORKER_COUNT =
|
||||
new Setting<>("transport.netty.worker_count",
|
||||
(s) -> Integer.toString(EsExecutors.allocatedProcessors(s) * 2),
|
||||
(s) -> Integer.toString(EsExecutors.allocatedProcessors(s)),
|
||||
(s) -> Setting.parseInt(s, 1, "transport.netty.worker_count"), Property.NodeScope);
|
||||
|
||||
public static final Setting<ByteSizeValue> NETTY_RECEIVE_PREDICTOR_SIZE = Setting.byteSizeSetting(
|
||||
|
|
|
@ -57,7 +57,7 @@ public class NioTransportPlugin extends Plugin implements NetworkPlugin {
|
|||
|
||||
public static final Setting<Integer> NIO_WORKER_COUNT =
|
||||
new Setting<>("transport.nio.worker_count",
|
||||
(s) -> Integer.toString(EsExecutors.allocatedProcessors(s) * 2),
|
||||
(s) -> Integer.toString(EsExecutors.allocatedProcessors(s)),
|
||||
(s) -> Setting.parseInt(s, 1, "transport.nio.worker_count"), Setting.Property.NodeScope);
|
||||
public static final Setting<Integer> NIO_HTTP_WORKER_COUNT =
|
||||
intSetting("http.nio.worker_count", 0, 0, Setting.Property.NodeScope);
|
||||
|
|
Loading…
Reference in New Issue