Log alloc description after netty processors set (#62741)

Currently we log the NettyAllocator description when the netty plugin is
created. Unfortunately, this hits certain static fields in Netty which
triggers the settings of the number of CPU processors. This conflicts
with out Elasticsearch behavior to override this based on a setting.

This commit resolves the issue by logging after the processors have been
set.
This commit is contained in:
Tim Brooks 2020-09-21 19:36:12 -06:00
parent 897d2e8a02
commit fae2f5f8e1
No known key found for this signature in database
GPG Key ID: C2AA3BB91A889E77
5 changed files with 16 additions and 9 deletions

View File

@ -150,6 +150,7 @@ public class Netty4HttpServerTransport extends AbstractHttpServerTransport {
SharedGroupFactory sharedGroupFactory) {
super(settings, networkService, bigArrays, threadPool, xContentRegistry, dispatcher, clusterSettings);
Netty4Utils.setAvailableProcessors(EsExecutors.NODE_PROCESSORS_SETTING.get(settings));
NettyAllocator.logAllocatorDescriptionIfNeeded();
this.sharedGroupFactory = sharedGroupFactory;
this.maxChunkSize = SETTING_HTTP_MAX_CHUNK_SIZE.get(settings);

View File

@ -19,8 +19,6 @@
package org.elasticsearch.transport;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.SetOnce;
import org.elasticsearch.Version;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
@ -51,14 +49,8 @@ public class Netty4Plugin extends Plugin implements NetworkPlugin {
public static final String NETTY_TRANSPORT_NAME = "netty4";
public static final String NETTY_HTTP_TRANSPORT_NAME = "netty4";
private static final Logger logger = LogManager.getLogger(Netty4Plugin.class);
private final SetOnce<SharedGroupFactory> groupFactory = new SetOnce<>();
public Netty4Plugin() {
logger.info("creating NettyAllocator with the following configs: " + NettyAllocator.getAllocatorDescription());
}
@Override
public List<Setting<?>> getSettings() {
return Arrays.asList(

View File

@ -27,12 +27,19 @@ import io.netty.buffer.UnpooledByteBufAllocator;
import io.netty.channel.Channel;
import io.netty.channel.ServerChannel;
import io.netty.channel.socket.nio.NioServerSocketChannel;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.Booleans;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.monitor.jvm.JvmInfo;
import java.util.concurrent.atomic.AtomicBoolean;
public class NettyAllocator {
private static final Logger logger = LogManager.getLogger(NettyAllocator.class);
private static final AtomicBoolean descriptionLogged = new AtomicBoolean(false);
private static final ByteBufAllocator ALLOCATOR;
private static final String DESCRIPTION;
@ -95,6 +102,12 @@ public class NettyAllocator {
}
}
public static void logAllocatorDescriptionIfNeeded() {
if (descriptionLogged.compareAndSet(false, true)) {
logger.info("creating NettyAllocator with the following configs: " + NettyAllocator.getAllocatorDescription());
}
}
public static ByteBufAllocator getAllocator() {
return ALLOCATOR;
}

View File

@ -104,6 +104,7 @@ public class Netty4Transport extends TcpTransport {
CircuitBreakerService circuitBreakerService, SharedGroupFactory sharedGroupFactory) {
super(settings, version, threadPool, pageCacheRecycler, circuitBreakerService, namedWriteableRegistry, networkService);
Netty4Utils.setAvailableProcessors(EsExecutors.NODE_PROCESSORS_SETTING.get(settings));
NettyAllocator.logAllocatorDescriptionIfNeeded();
this.sharedGroupFactory = sharedGroupFactory;
// See AdaptiveReceiveBufferSizePredictor#DEFAULT_XXX for default values in netty..., we can use higher ones for us, even fixed one

View File

@ -38,7 +38,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
public class Netty4Utils {
private static AtomicBoolean isAvailableProcessorsSet = new AtomicBoolean();
private static final AtomicBoolean isAvailableProcessorsSet = new AtomicBoolean();
/**
* Set the number of available processors that Netty uses for sizing various resources (e.g., thread pools).