Remove AbstractComponent from AbstractLifecycleComponent (#35560)

AbstractLifecycleComponent now no longer extends AbstractComponent. In
order to accomplish this, many, many classes now instantiate their own
logger.
This commit is contained in:
Gordon Brown 2018-11-19 09:51:32 -07:00 committed by GitHub
parent 156b3cae15
commit b2057138a7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
48 changed files with 152 additions and 25 deletions

View File

@ -19,6 +19,8 @@
package org.elasticsearch.repositories.url; package org.elasticsearch.repositories.url;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.cluster.metadata.RepositoryMetaData;
import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobContainer;
import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobPath;
@ -50,6 +52,7 @@ import java.util.function.Function;
* </dl> * </dl>
*/ */
public class URLRepository extends BlobStoreRepository { public class URLRepository extends BlobStoreRepository {
private static final Logger logger = LogManager.getLogger(URLRepository.class);
public static final String TYPE = "url"; public static final String TYPE = "url";

View File

@ -41,6 +41,8 @@ import io.netty.handler.codec.http.HttpResponseEncoder;
import io.netty.handler.timeout.ReadTimeoutException; import io.netty.handler.timeout.ReadTimeoutException;
import io.netty.handler.timeout.ReadTimeoutHandler; import io.netty.handler.timeout.ReadTimeoutHandler;
import io.netty.util.AttributeKey; import io.netty.util.AttributeKey;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.network.CloseableChannel; import org.elasticsearch.common.network.CloseableChannel;
@ -92,6 +94,7 @@ import static org.elasticsearch.http.HttpTransportSettings.SETTING_PIPELINING_MA
import static org.elasticsearch.http.netty4.cors.Netty4CorsHandler.ANY_ORIGIN; import static org.elasticsearch.http.netty4.cors.Netty4CorsHandler.ANY_ORIGIN;
public class Netty4HttpServerTransport extends AbstractHttpServerTransport { public class Netty4HttpServerTransport extends AbstractHttpServerTransport {
private static final Logger logger = LogManager.getLogger(Netty4HttpServerTransport.class);
static { static {
Netty4Utils.setup(); Netty4Utils.setup();
@ -338,7 +341,7 @@ public class Netty4HttpServerTransport extends AbstractHttpServerTransport {
if (handlingSettings.isCorsEnabled()) { if (handlingSettings.isCorsEnabled()) {
ch.pipeline().addLast("cors", new Netty4CorsHandler(transport.corsConfig)); ch.pipeline().addLast("cors", new Netty4CorsHandler(transport.corsConfig));
} }
ch.pipeline().addLast("pipelining", new Netty4HttpPipeliningHandler(transport.logger, transport.pipeliningMaxEvents)); ch.pipeline().addLast("pipelining", new Netty4HttpPipeliningHandler(logger, transport.pipeliningMaxEvents));
ch.pipeline().addLast("handler", requestHandler); ch.pipeline().addLast("handler", requestHandler);
transport.serverAcceptedChannel(nettyHttpChannel); transport.serverAcceptedChannel(nettyHttpChannel);
} }

View File

@ -36,6 +36,8 @@ import io.netty.channel.socket.nio.NioServerSocketChannel;
import io.netty.channel.socket.nio.NioSocketChannel; import io.netty.channel.socket.nio.NioSocketChannel;
import io.netty.util.AttributeKey; import io.netty.util.AttributeKey;
import io.netty.util.concurrent.Future; import io.netty.util.concurrent.Future;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.Version; import org.elasticsearch.Version;
@ -73,6 +75,7 @@ import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadF
* sending out ping requests to other nodes. * sending out ping requests to other nodes.
*/ */
public class Netty4Transport extends TcpTransport { public class Netty4Transport extends TcpTransport {
private static final Logger logger = LogManager.getLogger(Netty4Transport.class);
static { static {
Netty4Utils.setup(); Netty4Utils.setup();

View File

@ -33,6 +33,8 @@ import com.microsoft.windowsazure.management.compute.ComputeManagementClient;
import com.microsoft.windowsazure.management.compute.ComputeManagementService; import com.microsoft.windowsazure.management.compute.ComputeManagementService;
import com.microsoft.windowsazure.management.compute.models.HostedServiceGetDetailedResponse; import com.microsoft.windowsazure.management.compute.models.HostedServiceGetDetailedResponse;
import com.microsoft.windowsazure.management.configuration.ManagementConfiguration; import com.microsoft.windowsazure.management.configuration.ManagementConfiguration;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.SpecialPermission; import org.elasticsearch.SpecialPermission;
import org.elasticsearch.cloud.azure.classic.AzureServiceRemoteException; import org.elasticsearch.cloud.azure.classic.AzureServiceRemoteException;
@ -43,6 +45,8 @@ import org.elasticsearch.common.settings.Settings;
public class AzureComputeServiceImpl extends AbstractLifecycleComponent public class AzureComputeServiceImpl extends AbstractLifecycleComponent
implements AzureComputeService { implements AzureComputeService {
private static final Logger logger = LogManager.getLogger(AzureComputeServiceImpl.class);
private final ComputeManagementClient client; private final ComputeManagementClient client;
private final String serviceName; private final String serviceName;

View File

@ -30,12 +30,15 @@ import com.google.api.client.http.GenericUrl;
import com.google.api.client.http.HttpHeaders; import com.google.api.client.http.HttpHeaders;
import com.google.api.client.http.HttpResponse; import com.google.api.client.http.HttpResponse;
import com.google.api.client.http.HttpTransport; import com.google.api.client.http.HttpTransport;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.cloud.gce.util.Access; import org.elasticsearch.cloud.gce.util.Access;
import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
public class GceMetadataService extends AbstractLifecycleComponent { public class GceMetadataService extends AbstractLifecycleComponent {
private static final Logger logger = LogManager.getLogger(GceMetadataService.class);
// Forcing Google Token API URL as set in GCE SDK to // Forcing Google Token API URL as set in GCE SDK to
// http://metadata/computeMetadata/v1/instance/service-accounts/default/token // http://metadata/computeMetadata/v1/instance/service-accounts/default/token

View File

@ -22,6 +22,8 @@ package org.elasticsearch.repositories.azure;
import com.microsoft.azure.storage.LocationMode; import com.microsoft.azure.storage.LocationMode;
import com.microsoft.azure.storage.StorageException; import com.microsoft.azure.storage.StorageException;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.cluster.metadata.RepositoryMetaData;
@ -58,6 +60,7 @@ import static org.elasticsearch.repositories.azure.AzureStorageService.MIN_CHUNK
* </dl> * </dl>
*/ */
public class AzureRepository extends BlobStoreRepository { public class AzureRepository extends BlobStoreRepository {
private static final Logger logger = LogManager.getLogger(AzureRepository.class);
public static final String TYPE = "azure"; public static final String TYPE = "azure";

View File

@ -19,6 +19,8 @@
package org.elasticsearch.repositories.gcs; package org.elasticsearch.repositories.gcs;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.cluster.metadata.RepositoryMetaData;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobPath;
@ -39,6 +41,7 @@ import static org.elasticsearch.common.settings.Setting.byteSizeSetting;
import static org.elasticsearch.common.settings.Setting.simpleString; import static org.elasticsearch.common.settings.Setting.simpleString;
class GoogleCloudStorageRepository extends BlobStoreRepository { class GoogleCloudStorageRepository extends BlobStoreRepository {
private static final Logger logger = LogManager.getLogger(GoogleCloudStorageRepository.class);
// package private for testing // package private for testing
static final ByteSizeValue MIN_CHUNK_SIZE = new ByteSizeValue(1, ByteSizeUnit.BYTES); static final ByteSizeValue MIN_CHUNK_SIZE = new ByteSizeValue(1, ByteSizeUnit.BYTES);

View File

@ -52,7 +52,7 @@ import java.util.Locale;
public final class HdfsRepository extends BlobStoreRepository { public final class HdfsRepository extends BlobStoreRepository {
private static final Logger LOGGER = LogManager.getLogger(HdfsRepository.class); private static final Logger logger = LogManager.getLogger(HdfsRepository.class);
private static final String CONF_SECURITY_PRINCIPAL = "security.principal"; private static final String CONF_SECURITY_PRINCIPAL = "security.principal";
@ -105,7 +105,7 @@ public final class HdfsRepository extends BlobStoreRepository {
final Settings confSettings = repositorySettings.getByPrefix("conf."); final Settings confSettings = repositorySettings.getByPrefix("conf.");
for (String key : confSettings.keySet()) { for (String key : confSettings.keySet()) {
LOGGER.debug("Adding configuration to HDFS Client Configuration : {} = {}", key, confSettings.get(key)); logger.debug("Adding configuration to HDFS Client Configuration : {} = {}", key, confSettings.get(key));
hadoopConfiguration.set(key, confSettings.get(key)); hadoopConfiguration.set(key, confSettings.get(key));
} }
@ -161,7 +161,7 @@ public final class HdfsRepository extends BlobStoreRepository {
// Check to see if the authentication method is compatible // Check to see if the authentication method is compatible
if (kerberosPrincipal != null && authMethod.equals(AuthenticationMethod.SIMPLE)) { if (kerberosPrincipal != null && authMethod.equals(AuthenticationMethod.SIMPLE)) {
LOGGER.warn("Hadoop authentication method is set to [SIMPLE], but a Kerberos principal is " + logger.warn("Hadoop authentication method is set to [SIMPLE], but a Kerberos principal is " +
"specified. Continuing with [KERBEROS] authentication."); "specified. Continuing with [KERBEROS] authentication.");
SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, hadoopConfiguration); SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, hadoopConfiguration);
} else if (kerberosPrincipal == null && authMethod.equals(AuthenticationMethod.KERBEROS)) { } else if (kerberosPrincipal == null && authMethod.equals(AuthenticationMethod.KERBEROS)) {
@ -174,15 +174,15 @@ public final class HdfsRepository extends BlobStoreRepository {
UserGroupInformation.setConfiguration(hadoopConfiguration); UserGroupInformation.setConfiguration(hadoopConfiguration);
// Debugging // Debugging
LOGGER.debug("Hadoop security enabled: [{}]", UserGroupInformation.isSecurityEnabled()); logger.debug("Hadoop security enabled: [{}]", UserGroupInformation.isSecurityEnabled());
LOGGER.debug("Using Hadoop authentication method: [{}]", SecurityUtil.getAuthenticationMethod(hadoopConfiguration)); logger.debug("Using Hadoop authentication method: [{}]", SecurityUtil.getAuthenticationMethod(hadoopConfiguration));
// UserGroupInformation (UGI) instance is just a Hadoop specific wrapper around a Java Subject // UserGroupInformation (UGI) instance is just a Hadoop specific wrapper around a Java Subject
try { try {
if (UserGroupInformation.isSecurityEnabled()) { if (UserGroupInformation.isSecurityEnabled()) {
String principal = preparePrincipal(kerberosPrincipal); String principal = preparePrincipal(kerberosPrincipal);
String keytab = HdfsSecurityContext.locateKeytabFile(environment).toString(); String keytab = HdfsSecurityContext.locateKeytabFile(environment).toString();
LOGGER.debug("Using kerberos principal [{}] and keytab located at [{}]", principal, keytab); logger.debug("Using kerberos principal [{}] and keytab located at [{}]", principal, keytab);
return UserGroupInformation.loginUserFromKeytabAndReturnUGI(principal, keytab); return UserGroupInformation.loginUserFromKeytabAndReturnUGI(principal, keytab);
} }
return UserGroupInformation.getCurrentUser(); return UserGroupInformation.getCurrentUser();
@ -203,7 +203,7 @@ public final class HdfsRepository extends BlobStoreRepository {
} }
if (originalPrincipal.equals(finalPrincipal) == false) { if (originalPrincipal.equals(finalPrincipal) == false) {
LOGGER.debug("Found service principal. Converted original principal name [{}] to server principal [{}]", logger.debug("Found service principal. Converted original principal name [{}] to server principal [{}]",
originalPrincipal, finalPrincipal); originalPrincipal, finalPrincipal);
} }
} }

View File

@ -20,6 +20,7 @@
package org.elasticsearch.http.nio; package org.elasticsearch.http.nio;
import io.netty.handler.codec.http.HttpMethod; import io.netty.handler.codec.http.HttpMethod;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.Logger;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
@ -80,6 +81,7 @@ import static org.elasticsearch.http.HttpTransportSettings.SETTING_PIPELINING_MA
import static org.elasticsearch.http.nio.cors.NioCorsHandler.ANY_ORIGIN; import static org.elasticsearch.http.nio.cors.NioCorsHandler.ANY_ORIGIN;
public class NioHttpServerTransport extends AbstractHttpServerTransport { public class NioHttpServerTransport extends AbstractHttpServerTransport {
private static final Logger logger = LogManager.getLogger(NioHttpServerTransport.class);
public static final Setting<Integer> NIO_HTTP_ACCEPTOR_COUNT = public static final Setting<Integer> NIO_HTTP_ACCEPTOR_COUNT =
intSetting("http.nio.acceptor_count", 1, 1, Setting.Property.NodeScope); intSetting("http.nio.acceptor_count", 1, 1, Setting.Property.NodeScope);

View File

@ -19,6 +19,8 @@
package org.elasticsearch.transport.nio; package org.elasticsearch.transport.nio;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
@ -55,6 +57,7 @@ import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.new
import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory; import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory;
public class NioTransport extends TcpTransport { public class NioTransport extends TcpTransport {
private static final Logger logger = LogManager.getLogger(NioTransport.class);
public static final Setting<Integer> NIO_WORKER_COUNT = public static final Setting<Integer> NIO_WORKER_COUNT =
new Setting<>("transport.nio.worker_count", new Setting<>("transport.nio.worker_count",

View File

@ -18,6 +18,8 @@
*/ */
package org.elasticsearch.cluster; package org.elasticsearch.cluster;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.node.DiscoveryNodes;
@ -54,6 +56,7 @@ import static org.elasticsearch.common.settings.Setting.positiveTimeSetting;
* is done by {@link MasterFaultDetection}. * is done by {@link MasterFaultDetection}.
*/ */
public class NodeConnectionsService extends AbstractLifecycleComponent { public class NodeConnectionsService extends AbstractLifecycleComponent {
private static final Logger logger = LogManager.getLogger(NodeConnectionsService.class);
public static final Setting<TimeValue> CLUSTER_NODE_RECONNECT_INTERVAL_SETTING = public static final Setting<TimeValue> CLUSTER_NODE_RECONNECT_INTERVAL_SETTING =
positiveTimeSetting("cluster.nodes.reconnect_interval", TimeValue.timeValueSeconds(10), Property.NodeScope); positiveTimeSetting("cluster.nodes.reconnect_interval", TimeValue.timeValueSeconds(10), Property.NodeScope);

View File

@ -19,6 +19,8 @@
package org.elasticsearch.cluster.routing; package org.elasticsearch.cluster.routing;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.ClusterStateListener;
@ -52,6 +54,7 @@ import java.util.concurrent.atomic.AtomicReference;
* another cluster change event. * another cluster change event.
*/ */
public class DelayedAllocationService extends AbstractLifecycleComponent implements ClusterStateListener { public class DelayedAllocationService extends AbstractLifecycleComponent implements ClusterStateListener {
private static final Logger logger = LogManager.getLogger(DelayedAllocationService.class);
static final String CLUSTER_UPDATE_TASK_SOURCE = "delayed_allocation_reroute"; static final String CLUSTER_UPDATE_TASK_SOURCE = "delayed_allocation_reroute";

View File

@ -19,6 +19,8 @@
package org.elasticsearch.cluster.routing; package org.elasticsearch.cluster.routing;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterState;
@ -45,6 +47,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
* </p> * </p>
*/ */
public class RoutingService extends AbstractLifecycleComponent { public class RoutingService extends AbstractLifecycleComponent {
private static final Logger logger = LogManager.getLogger(RoutingService.class);
private static final String CLUSTER_UPDATE_TASK_SOURCE = "cluster_reroute"; private static final String CLUSTER_UPDATE_TASK_SOURCE = "cluster_reroute";

View File

@ -19,6 +19,7 @@
package org.elasticsearch.cluster.service; package org.elasticsearch.cluster.service;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterChangedEvent;
@ -66,6 +67,7 @@ import static org.elasticsearch.cluster.service.ClusterService.CLUSTER_SERVICE_S
import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory; import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory;
public class ClusterApplierService extends AbstractLifecycleComponent implements ClusterApplier { public class ClusterApplierService extends AbstractLifecycleComponent implements ClusterApplier {
private static final Logger logger = LogManager.getLogger(ClusterApplierService.class);
public static final String CLUSTER_UPDATE_THREAD_NAME = "clusterApplierService#updateTask"; public static final String CLUSTER_UPDATE_THREAD_NAME = "clusterApplierService#updateTask";

View File

@ -19,6 +19,8 @@
package org.elasticsearch.cluster.service; package org.elasticsearch.cluster.service;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateApplier; import org.elasticsearch.cluster.ClusterStateApplier;
@ -43,6 +45,7 @@ import java.util.Collections;
import java.util.Map; import java.util.Map;
public class ClusterService extends AbstractLifecycleComponent { public class ClusterService extends AbstractLifecycleComponent {
private static final Logger logger = LogManager.getLogger(ClusterService.class);
private final MasterService masterService; private final MasterService masterService;

View File

@ -67,6 +67,7 @@ import static org.elasticsearch.cluster.service.ClusterService.CLUSTER_SERVICE_S
import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory; import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory;
public class MasterService extends AbstractLifecycleComponent { public class MasterService extends AbstractLifecycleComponent {
private static final Logger logger = LogManager.getLogger(MasterService.class);
public static final String MASTER_UPDATE_THREAD_NAME = "masterService#updateTask"; public static final String MASTER_UPDATE_THREAD_NAME = "masterService#updateTask";

View File

@ -19,13 +19,16 @@
package org.elasticsearch.common.component; package org.elasticsearch.common.component;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import java.io.IOException; import java.io.IOException;
import java.util.List; import java.util.List;
import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CopyOnWriteArrayList;
public abstract class AbstractLifecycleComponent extends AbstractComponent implements LifecycleComponent { public abstract class AbstractLifecycleComponent implements LifecycleComponent {
private static final Logger logger = LogManager.getLogger(AbstractLifecycleComponent.class);
protected final Lifecycle lifecycle = new Lifecycle(); protected final Lifecycle lifecycle = new Lifecycle();

View File

@ -19,6 +19,8 @@
package org.elasticsearch.discovery.single; package org.elasticsearch.discovery.single;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterName;
@ -46,6 +48,7 @@ import static org.elasticsearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK
* A discovery implementation where the only member of the cluster is the local node. * A discovery implementation where the only member of the cluster is the local node.
*/ */
public class SingleNodeDiscovery extends AbstractLifecycleComponent implements Discovery { public class SingleNodeDiscovery extends AbstractLifecycleComponent implements Discovery {
private static final Logger logger = LogManager.getLogger(SingleNodeDiscovery.class);
private final ClusterName clusterName; private final ClusterName clusterName;
protected final TransportService transportService; protected final TransportService transportService;

View File

@ -19,6 +19,7 @@
package org.elasticsearch.discovery.zen; package org.elasticsearch.discovery.zen;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.core.internal.io.IOUtils;
@ -87,6 +88,7 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
import static org.elasticsearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; import static org.elasticsearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK;
public class ZenDiscovery extends AbstractLifecycleComponent implements Discovery, PingContextProvider, IncomingClusterStateListener { public class ZenDiscovery extends AbstractLifecycleComponent implements Discovery, PingContextProvider, IncomingClusterStateListener {
private static final Logger logger = LogManager.getLogger(ZenDiscovery.class);
public static final Setting<TimeValue> PING_TIMEOUT_SETTING = public static final Setting<TimeValue> PING_TIMEOUT_SETTING =
Setting.positiveTimeSetting("discovery.zen.ping_timeout", timeValueSeconds(3), Property.NodeScope); Setting.positiveTimeSetting("discovery.zen.ping_timeout", timeValueSeconds(3), Property.NodeScope);

View File

@ -20,6 +20,8 @@
package org.elasticsearch.gateway; package org.elasticsearch.gateway;
import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectCursor;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterState;
@ -48,6 +50,7 @@ import org.elasticsearch.threadpool.ThreadPool;
import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicBoolean;
public class GatewayService extends AbstractLifecycleComponent implements ClusterStateListener { public class GatewayService extends AbstractLifecycleComponent implements ClusterStateListener {
private static final Logger logger = LogManager.getLogger(GatewayService.class);
public static final Setting<Integer> EXPECTED_NODES_SETTING = public static final Setting<Integer> EXPECTED_NODES_SETTING =
Setting.intSetting("gateway.expected_nodes", -1, -1, Property.NodeScope); Setting.intSetting("gateway.expected_nodes", -1, -1, Property.NodeScope);

View File

@ -21,6 +21,8 @@ package org.elasticsearch.http;
import com.carrotsearch.hppc.IntHashSet; import com.carrotsearch.hppc.IntHashSet;
import com.carrotsearch.hppc.IntSet; import com.carrotsearch.hppc.IntSet;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
@ -63,6 +65,8 @@ import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_PUBLISH_
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_PUBLISH_PORT; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_PUBLISH_PORT;
public abstract class AbstractHttpServerTransport extends AbstractLifecycleComponent implements HttpServerTransport { public abstract class AbstractHttpServerTransport extends AbstractLifecycleComponent implements HttpServerTransport {
private static final Logger logger = LogManager.getLogger(AbstractHttpServerTransport.class);
protected final Settings settings; protected final Settings settings;
public final HttpHandlingSettings handlingSettings; public final HttpHandlingSettings handlingSettings;
protected final NetworkService networkService; protected final NetworkService networkService;

View File

@ -19,6 +19,7 @@
package org.elasticsearch.indices; package org.elasticsearch.indices;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.DirectoryReader;
@ -154,6 +155,7 @@ import static org.elasticsearch.index.query.AbstractQueryBuilder.parseInnerQuery
public class IndicesService extends AbstractLifecycleComponent public class IndicesService extends AbstractLifecycleComponent
implements IndicesClusterStateService.AllocatedIndices<IndexShard, IndexService>, IndexService.ShardStoreDeleter { implements IndicesClusterStateService.AllocatedIndices<IndexShard, IndexService>, IndexService.ShardStoreDeleter {
private static final Logger logger = LogManager.getLogger(IndicesService.class);
public static final String INDICES_SHARDS_CLOSED_TIMEOUT = "indices.shards_closed_timeout"; public static final String INDICES_SHARDS_CLOSED_TIMEOUT = "indices.shards_closed_timeout";
public static final Setting<TimeValue> INDICES_CACHE_CLEAN_INTERVAL_SETTING = public static final Setting<TimeValue> INDICES_CACHE_CLEAN_INTERVAL_SETTING =

View File

@ -19,6 +19,8 @@
package org.elasticsearch.indices.breaker; package org.elasticsearch.indices.breaker;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.breaker.CircuitBreaker;
import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
@ -28,6 +30,7 @@ import org.elasticsearch.common.settings.Settings;
* that load field data. * that load field data.
*/ */
public abstract class CircuitBreakerService extends AbstractLifecycleComponent { public abstract class CircuitBreakerService extends AbstractLifecycleComponent {
private static final Logger logger = LogManager.getLogger(CircuitBreakerService.class);
protected CircuitBreakerService(Settings settings) { protected CircuitBreakerService(Settings settings) {
super(settings); super(settings);

View File

@ -20,6 +20,7 @@
package org.elasticsearch.indices.breaker; package org.elasticsearch.indices.breaker;
import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.breaker.ChildMemoryCircuitBreaker; import org.elasticsearch.common.breaker.ChildMemoryCircuitBreaker;
import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.breaker.CircuitBreaker;
import org.elasticsearch.common.breaker.CircuitBreakingException; import org.elasticsearch.common.breaker.CircuitBreakingException;
@ -44,6 +45,7 @@ import java.util.stream.Collectors;
* if tripped * if tripped
*/ */
public class HierarchyCircuitBreakerService extends CircuitBreakerService { public class HierarchyCircuitBreakerService extends CircuitBreakerService {
private static final Logger logger = LogManager.getLogger(HierarchyCircuitBreakerService.class);
private static final String CHILD_LOGGER_PREFIX = "org.elasticsearch.indices.breaker."; private static final String CHILD_LOGGER_PREFIX = "org.elasticsearch.indices.breaker.";

View File

@ -19,6 +19,7 @@
package org.elasticsearch.indices.cluster; package org.elasticsearch.indices.cluster;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.lucene.store.LockObtainFailedException; import org.apache.lucene.store.LockObtainFailedException;
@ -96,6 +97,7 @@ import static org.elasticsearch.indices.cluster.IndicesClusterStateService.Alloc
import static org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason.NO_LONGER_ASSIGNED; import static org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason.NO_LONGER_ASSIGNED;
public class IndicesClusterStateService extends AbstractLifecycleComponent implements ClusterStateApplier { public class IndicesClusterStateService extends AbstractLifecycleComponent implements ClusterStateApplier {
private static final Logger logger = LogManager.getLogger(IndicesClusterStateService.class);
final AllocatedIndices<? extends Shard, ? extends AllocatedIndex<? extends Shard>> indicesService; final AllocatedIndices<? extends Shard, ? extends AllocatedIndex<? extends Shard>> indicesService;
private final ClusterService clusterService; private final ClusterService clusterService;

View File

@ -19,6 +19,7 @@
package org.elasticsearch.monitor.jvm; package org.elasticsearch.monitor.jvm;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting;
@ -41,6 +42,7 @@ import java.util.function.BiFunction;
import static java.util.Collections.unmodifiableMap; import static java.util.Collections.unmodifiableMap;
public class JvmGcMonitorService extends AbstractLifecycleComponent { public class JvmGcMonitorService extends AbstractLifecycleComponent {
private static final Logger logger = LogManager.getLogger(JvmGcMonitorService.class);
private final ThreadPool threadPool; private final ThreadPool threadPool;
private final boolean enabled; private final boolean enabled;

View File

@ -19,6 +19,8 @@
package org.elasticsearch.repositories.blobstore; package org.elasticsearch.repositories.blobstore;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexCommit;
@ -169,6 +171,7 @@ import static org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSna
* </pre> * </pre>
*/ */
public abstract class BlobStoreRepository extends AbstractLifecycleComponent implements Repository { public abstract class BlobStoreRepository extends AbstractLifecycleComponent implements Repository {
private static final Logger logger = LogManager.getLogger(BlobStoreRepository.class);
protected final RepositoryMetaData metadata; protected final RepositoryMetaData metadata;

View File

@ -19,6 +19,8 @@
package org.elasticsearch.repositories.fs; package org.elasticsearch.repositories.fs;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.cluster.metadata.RepositoryMetaData;
import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobPath;
import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.blobstore.BlobStore;
@ -46,6 +48,7 @@ import java.util.function.Function;
* </dl> * </dl>
*/ */
public class FsRepository extends BlobStoreRepository { public class FsRepository extends BlobStoreRepository {
private static final Logger logger = LogManager.getLogger(FsRepository.class);
public static final String TYPE = "fs"; public static final String TYPE = "fs";

View File

@ -19,6 +19,8 @@
package org.elasticsearch.search; package org.elasticsearch.search;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.FieldDoc;
import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopDocs;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
@ -119,6 +121,7 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueMillis;
import static org.elasticsearch.common.unit.TimeValue.timeValueMinutes; import static org.elasticsearch.common.unit.TimeValue.timeValueMinutes;
public class SearchService extends AbstractLifecycleComponent implements IndexEventListener { public class SearchService extends AbstractLifecycleComponent implements IndexEventListener {
private static final Logger logger = LogManager.getLogger(SearchService.class);
// we can have 5 minutes here, since we make sure to clean with search requests and when shard/index closes // we can have 5 minutes here, since we make sure to clean with search requests and when shard/index closes
public static final Setting<TimeValue> DEFAULT_KEEPALIVE_SETTING = public static final Setting<TimeValue> DEFAULT_KEEPALIVE_SETTING =

View File

@ -20,6 +20,8 @@
package org.elasticsearch.snapshots; package org.elasticsearch.snapshots;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.lucene.util.SetOnce; import org.apache.lucene.util.SetOnce;
import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ExceptionsHelper;
@ -90,6 +92,7 @@ import static org.elasticsearch.transport.EmptyTransportResponseHandler.INSTANCE
* starting and stopping shard level snapshots * starting and stopping shard level snapshots
*/ */
public class SnapshotShardsService extends AbstractLifecycleComponent implements ClusterStateListener, IndexEventListener { public class SnapshotShardsService extends AbstractLifecycleComponent implements ClusterStateListener, IndexEventListener {
private static final Logger logger = LogManager.getLogger(SnapshotShardsService.class);
public static final String UPDATE_SNAPSHOT_STATUS_ACTION_NAME = "internal:cluster/snapshot/update_snapshot_status"; public static final String UPDATE_SNAPSHOT_STATUS_ACTION_NAME = "internal:cluster/snapshot/update_snapshot_status";

View File

@ -21,6 +21,8 @@ package org.elasticsearch.snapshots;
import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectCursor;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.lucene.util.CollectionUtil; import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ExceptionsHelper;
@ -102,6 +104,7 @@ import static org.elasticsearch.cluster.SnapshotsInProgress.completed;
* </ul> * </ul>
*/ */
public class SnapshotsService extends AbstractLifecycleComponent implements ClusterStateApplier { public class SnapshotsService extends AbstractLifecycleComponent implements ClusterStateApplier {
private static final Logger logger = LogManager.getLogger(SnapshotsService.class);
private final ClusterService clusterService; private final ClusterService clusterService;

View File

@ -20,6 +20,8 @@ package org.elasticsearch.transport;
import com.carrotsearch.hppc.IntHashSet; import com.carrotsearch.hppc.IntHashSet;
import com.carrotsearch.hppc.IntSet; import com.carrotsearch.hppc.IntSet;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version; import org.elasticsearch.Version;
@ -114,6 +116,7 @@ import static org.elasticsearch.common.transport.NetworkExceptionHelper.isConnec
import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap; import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap;
public abstract class TcpTransport extends AbstractLifecycleComponent implements Transport { public abstract class TcpTransport extends AbstractLifecycleComponent implements Transport {
private static final Logger logger = LogManager.getLogger(TcpTransport.class);
public static final String TRANSPORT_WORKER_THREAD_NAME_PREFIX = "transport_worker"; public static final String TRANSPORT_WORKER_THREAD_NAME_PREFIX = "transport_worker";

View File

@ -19,6 +19,7 @@
package org.elasticsearch.transport; package org.elasticsearch.transport;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.Version; import org.elasticsearch.Version;
@ -80,6 +81,7 @@ import static org.elasticsearch.common.settings.Setting.listSetting;
import static org.elasticsearch.common.settings.Setting.timeSetting; import static org.elasticsearch.common.settings.Setting.timeSetting;
public class TransportService extends AbstractLifecycleComponent implements TransportMessageListener, TransportConnectionListener { public class TransportService extends AbstractLifecycleComponent implements TransportMessageListener, TransportConnectionListener {
private static final Logger logger = LogManager.getLogger(TransportService.class);
public static final Setting<Integer> CONNECTIONS_PER_NODE_RECOVERY = public static final Setting<Integer> CONNECTIONS_PER_NODE_RECOVERY =
intSetting("transport.connections_per_node.recovery", 2, 1, Setting.Property.NodeScope); intSetting("transport.connections_per_node.recovery", 2, 1, Setting.Property.NodeScope);

View File

@ -18,6 +18,8 @@
*/ */
package org.elasticsearch.watcher; package org.elasticsearch.watcher;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting;
@ -41,6 +43,7 @@ import java.util.concurrent.CopyOnWriteArraySet;
* defaults to {@code 60s}. The service can be disabled by setting {@code resource.reload.enabled} setting to {@code false}. * defaults to {@code 60s}. The service can be disabled by setting {@code resource.reload.enabled} setting to {@code false}.
*/ */
public class ResourceWatcherService extends AbstractLifecycleComponent { public class ResourceWatcherService extends AbstractLifecycleComponent {
private static final Logger logger = LogManager.getLogger(ResourceWatcherService.class);
public enum Frequency { public enum Frequency {

View File

@ -119,13 +119,13 @@ public class ClusterApplierServiceTests extends ESTestCase {
mockAppender.addExpectation( mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation( new MockLogAppender.SeenEventExpectation(
"test1", "test1",
clusterApplierService.getClass().getCanonicalName(), ClusterApplierService.class.getCanonicalName(),
Level.DEBUG, Level.DEBUG,
"*processing [test1]: took [1s] no change in cluster state")); "*processing [test1]: took [1s] no change in cluster state"));
mockAppender.addExpectation( mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation( new MockLogAppender.SeenEventExpectation(
"test2", "test2",
clusterApplierService.getClass().getCanonicalName(), ClusterApplierService.class.getCanonicalName(),
Level.TRACE, Level.TRACE,
"*failed to execute cluster state applier in [2s]*")); "*failed to execute cluster state applier in [2s]*"));
mockAppender.addExpectation( mockAppender.addExpectation(
@ -135,7 +135,7 @@ public class ClusterApplierServiceTests extends ESTestCase {
Level.DEBUG, Level.DEBUG,
"*processing [test3]: took [0s] no change in cluster state*")); "*processing [test3]: took [0s] no change in cluster state*"));
Logger clusterLogger = LogManager.getLogger("org.elasticsearch.cluster.service"); Logger clusterLogger = LogManager.getLogger(ClusterApplierService.class);
Loggers.addAppender(clusterLogger, mockAppender); Loggers.addAppender(clusterLogger, mockAppender);
try { try {
clusterApplierService.currentTimeOverride = System.nanoTime(); clusterApplierService.currentTimeOverride = System.nanoTime();
@ -190,23 +190,23 @@ public class ClusterApplierServiceTests extends ESTestCase {
mockAppender.addExpectation( mockAppender.addExpectation(
new MockLogAppender.UnseenEventExpectation( new MockLogAppender.UnseenEventExpectation(
"test1 shouldn't see because setting is too low", "test1 shouldn't see because setting is too low",
clusterApplierService.getClass().getCanonicalName(), ClusterApplierService.class.getCanonicalName(),
Level.WARN, Level.WARN,
"*cluster state applier task [test1] took [*] above the warn threshold of *")); "*cluster state applier task [test1] took [*] above the warn threshold of *"));
mockAppender.addExpectation( mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation( new MockLogAppender.SeenEventExpectation(
"test2", "test2",
clusterApplierService.getClass().getCanonicalName(), ClusterApplierService.class.getCanonicalName(),
Level.WARN, Level.WARN,
"*cluster state applier task [test2] took [32s] above the warn threshold of *")); "*cluster state applier task [test2] took [32s] above the warn threshold of *"));
mockAppender.addExpectation( mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation( new MockLogAppender.SeenEventExpectation(
"test4", "test4",
clusterApplierService.getClass().getCanonicalName(), ClusterApplierService.class.getCanonicalName(),
Level.WARN, Level.WARN,
"*cluster state applier task [test3] took [34s] above the warn threshold of *")); "*cluster state applier task [test3] took [34s] above the warn threshold of *"));
Logger clusterLogger = LogManager.getLogger("org.elasticsearch.cluster.service"); Logger clusterLogger = LogManager.getLogger(ClusterApplierService.class);
Loggers.addAppender(clusterLogger, mockAppender); Loggers.addAppender(clusterLogger, mockAppender);
try { try {
final CountDownLatch latch = new CountDownLatch(4); final CountDownLatch latch = new CountDownLatch(4);

View File

@ -310,23 +310,23 @@ public class MasterServiceTests extends ESTestCase {
mockAppender.addExpectation( mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation( new MockLogAppender.SeenEventExpectation(
"test1", "test1",
masterService.getClass().getCanonicalName(), MasterService.class.getCanonicalName(),
Level.DEBUG, Level.DEBUG,
"*processing [test1]: took [1s] no change in cluster state")); "*processing [test1]: took [1s] no change in cluster state"));
mockAppender.addExpectation( mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation( new MockLogAppender.SeenEventExpectation(
"test2", "test2",
masterService.getClass().getCanonicalName(), MasterService.class.getCanonicalName(),
Level.TRACE, Level.TRACE,
"*failed to execute cluster state update in [2s]*")); "*failed to execute cluster state update in [2s]*"));
mockAppender.addExpectation( mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation( new MockLogAppender.SeenEventExpectation(
"test3", "test3",
masterService.getClass().getCanonicalName(), MasterService.class.getCanonicalName(),
Level.DEBUG, Level.DEBUG,
"*processing [test3]: took [3s] done publishing updated cluster state (version: *, uuid: *)")); "*processing [test3]: took [3s] done publishing updated cluster state (version: *, uuid: *)"));
Logger clusterLogger = LogManager.getLogger(masterService.getClass().getPackage().getName()); Logger clusterLogger = LogManager.getLogger(MasterService.class);
Loggers.addAppender(clusterLogger, mockAppender); Loggers.addAppender(clusterLogger, mockAppender);
try { try {
final CountDownLatch latch = new CountDownLatch(4); final CountDownLatch latch = new CountDownLatch(4);
@ -651,29 +651,29 @@ public class MasterServiceTests extends ESTestCase {
mockAppender.addExpectation( mockAppender.addExpectation(
new MockLogAppender.UnseenEventExpectation( new MockLogAppender.UnseenEventExpectation(
"test1 shouldn't see because setting is too low", "test1 shouldn't see because setting is too low",
masterService.getClass().getCanonicalName(), MasterService.class.getCanonicalName(),
Level.WARN, Level.WARN,
"*cluster state update task [test1] took [*] above the warn threshold of *")); "*cluster state update task [test1] took [*] above the warn threshold of *"));
mockAppender.addExpectation( mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation( new MockLogAppender.SeenEventExpectation(
"test2", "test2",
masterService.getClass().getCanonicalName(), MasterService.class.getCanonicalName(),
Level.WARN, Level.WARN,
"*cluster state update task [test2] took [32s] above the warn threshold of *")); "*cluster state update task [test2] took [32s] above the warn threshold of *"));
mockAppender.addExpectation( mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation( new MockLogAppender.SeenEventExpectation(
"test3", "test3",
masterService.getClass().getCanonicalName(), MasterService.class.getCanonicalName(),
Level.WARN, Level.WARN,
"*cluster state update task [test3] took [33s] above the warn threshold of *")); "*cluster state update task [test3] took [33s] above the warn threshold of *"));
mockAppender.addExpectation( mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation( new MockLogAppender.SeenEventExpectation(
"test4", "test4",
masterService.getClass().getCanonicalName(), MasterService.class.getCanonicalName(),
Level.WARN, Level.WARN,
"*cluster state update task [test4] took [34s] above the warn threshold of *")); "*cluster state update task [test4] took [34s] above the warn threshold of *"));
Logger clusterLogger = LogManager.getLogger(masterService.getClass().getPackage().getName()); Logger clusterLogger = LogManager.getLogger(MasterService.class);
Loggers.addAppender(clusterLogger, mockAppender); Loggers.addAppender(clusterLogger, mockAppender);
try { try {
final CountDownLatch latch = new CountDownLatch(5); final CountDownLatch latch = new CountDownLatch(5);

View File

@ -20,6 +20,8 @@
package org.elasticsearch.snapshots.mockstore; package org.elasticsearch.snapshots.mockstore;
import com.carrotsearch.randomizedtesting.RandomizedContext; import com.carrotsearch.randomizedtesting.RandomizedContext;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.CorruptIndexException;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.MetaData;
@ -57,6 +59,7 @@ import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicLong;
public class MockRepository extends FsRepository { public class MockRepository extends FsRepository {
private static final Logger logger = LogManager.getLogger(MockRepository.class);
public static class Plugin extends org.elasticsearch.plugins.Plugin implements RepositoryPlugin { public static class Plugin extends org.elasticsearch.plugins.Plugin implements RepositoryPlugin {

View File

@ -20,6 +20,8 @@
package org.elasticsearch.test.transport; package org.elasticsearch.test.transport;
import com.carrotsearch.randomizedtesting.SysGlobals; import com.carrotsearch.randomizedtesting.SysGlobals;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.ClusterModule;
@ -83,6 +85,7 @@ import java.util.function.Supplier;
* fake DiscoveryNode instances where the publish address is one of the bound addresses). * fake DiscoveryNode instances where the publish address is one of the bound addresses).
*/ */
public final class MockTransportService extends TransportService { public final class MockTransportService extends TransportService {
private static final Logger logger = LogManager.getLogger(MockTransportService.class);
private final Map<DiscoveryNode, List<Transport.Connection>> openConnections = new HashMap<>(); private final Map<DiscoveryNode, List<Transport.Connection>> openConnections = new HashMap<>();
private static final int JVM_ORDINAL = Integer.parseInt(System.getProperty(SysGlobals.CHILDVM_SYSPROP_JVM_ID, "0")); private static final int JVM_ORDINAL = Integer.parseInt(System.getProperty(SysGlobals.CHILDVM_SYSPROP_JVM_ID, "0"));

View File

@ -18,6 +18,8 @@
*/ */
package org.elasticsearch.transport; package org.elasticsearch.transport;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.cli.SuppressForbidden; import org.elasticsearch.cli.SuppressForbidden;
@ -70,6 +72,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
* the networking layer in the worst possible way since it blocks and uses a thread per request model. * the networking layer in the worst possible way since it blocks and uses a thread per request model.
*/ */
public class MockTcpTransport extends TcpTransport { public class MockTcpTransport extends TcpTransport {
private static final Logger logger = LogManager.getLogger(MockTcpTransport.class);
/** /**
* A pre-built light connection profile that shares a single connection across all * A pre-built light connection profile that shares a single connection across all

View File

@ -19,6 +19,8 @@
package org.elasticsearch.transport.nio; package org.elasticsearch.transport.nio;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version; import org.elasticsearch.Version;
@ -64,6 +66,7 @@ import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.new
import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory; import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory;
public class MockNioTransport extends TcpTransport { public class MockNioTransport extends TcpTransport {
private static final Logger logger = LogManager.getLogger(MockNioTransport.class);
private final PageCacheRecycler pageCacheRecycler; private final PageCacheRecycler pageCacheRecycler;
private final ConcurrentMap<String, MockTcpChannelFactory> profileToChannelFactory = newConcurrentMap(); private final ConcurrentMap<String, MockTcpChannelFactory> profileToChannelFactory = newConcurrentMap();

View File

@ -5,6 +5,8 @@
*/ */
package org.elasticsearch.license; package org.elasticsearch.license;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
@ -56,6 +58,7 @@ import java.util.concurrent.atomic.AtomicReference;
* the license changes are detected in the cluster state. * the license changes are detected in the cluster state.
*/ */
public class LicenseService extends AbstractLifecycleComponent implements ClusterStateListener, SchedulerEngine.Listener { public class LicenseService extends AbstractLifecycleComponent implements ClusterStateListener, SchedulerEngine.Listener {
private static final Logger logger = LogManager.getLogger(LicenseService.class);
public static final Setting<String> SELF_GENERATED_LICENSE_TYPE = new Setting<>("xpack.license.self_generated.type", public static final Setting<String> SELF_GENERATED_LICENSE_TYPE = new Setting<>("xpack.license.self_generated.type",
(s) -> "basic", (s) -> { (s) -> "basic", (s) -> {

View File

@ -11,6 +11,8 @@ import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelOutboundHandlerAdapter; import io.netty.channel.ChannelOutboundHandlerAdapter;
import io.netty.channel.ChannelPromise; import io.netty.channel.ChannelPromise;
import io.netty.handler.ssl.SslHandler; import io.netty.handler.ssl.SslHandler;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
@ -47,6 +49,7 @@ import static org.elasticsearch.xpack.core.security.SecurityField.setting;
* Implementation of a transport that extends the {@link Netty4Transport} to add SSL and IP Filtering * Implementation of a transport that extends the {@link Netty4Transport} to add SSL and IP Filtering
*/ */
public class SecurityNetty4Transport extends Netty4Transport { public class SecurityNetty4Transport extends Netty4Transport {
private static final Logger logger = LogManager.getLogger(SecurityNetty4Transport.class);
private final SSLService sslService; private final SSLService sslService;
private final SSLConfiguration sslConfiguration; private final SSLConfiguration sslConfiguration;

View File

@ -5,6 +5,8 @@
*/ */
package org.elasticsearch.xpack.monitoring; package org.elasticsearch.xpack.monitoring;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier; import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
@ -37,6 +39,8 @@ import java.util.concurrent.atomic.AtomicBoolean;
* service life cycles, the intended way to temporarily stop the publishing is using the start and stop methods. * service life cycles, the intended way to temporarily stop the publishing is using the start and stop methods.
*/ */
public class MonitoringService extends AbstractLifecycleComponent { public class MonitoringService extends AbstractLifecycleComponent {
private static final Logger logger = LogManager.getLogger(MonitoringService.class);
/** /**
* Minimum value for sampling interval (1 second) * Minimum value for sampling interval (1 second)

View File

@ -5,6 +5,8 @@
*/ */
package org.elasticsearch.xpack.monitoring.cleaner; package org.elasticsearch.xpack.monitoring.cleaner;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
@ -26,6 +28,7 @@ import java.util.concurrent.ScheduledFuture;
* {@code CleanerService} takes care of deleting old monitoring indices. * {@code CleanerService} takes care of deleting old monitoring indices.
*/ */
public class CleanerService extends AbstractLifecycleComponent { public class CleanerService extends AbstractLifecycleComponent {
private static final Logger logger = LogManager.getLogger(CleanerService.class);
private final XPackLicenseState licenseState; private final XPackLicenseState licenseState;
private final ThreadPool threadPool; private final ThreadPool threadPool;

View File

@ -5,6 +5,7 @@
*/ */
package org.elasticsearch.xpack.monitoring.exporter; package org.elasticsearch.xpack.monitoring.exporter;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier; import org.apache.logging.log4j.util.Supplier;
@ -36,6 +37,7 @@ import java.util.concurrent.atomic.AtomicReference;
import static java.util.Collections.emptyMap; import static java.util.Collections.emptyMap;
public class Exporters extends AbstractLifecycleComponent implements Iterable<Exporter> { public class Exporters extends AbstractLifecycleComponent implements Iterable<Exporter> {
private static final Logger logger = LogManager.getLogger(Exporters.class);
private final Settings settings; private final Settings settings;
private final Map<String, Exporter.Factory> factories; private final Map<String, Exporter.Factory> factories;

View File

@ -8,6 +8,8 @@ package org.elasticsearch.xpack.security.transport.netty4;
import io.netty.channel.Channel; import io.netty.channel.Channel;
import io.netty.channel.ChannelHandler; import io.netty.channel.ChannelHandler;
import io.netty.handler.ssl.SslHandler; import io.netty.handler.ssl.SslHandler;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.BigArrays;
@ -25,6 +27,7 @@ import javax.net.ssl.SSLEngine;
import static org.elasticsearch.xpack.core.XPackSettings.HTTP_SSL_ENABLED; import static org.elasticsearch.xpack.core.XPackSettings.HTTP_SSL_ENABLED;
public class SecurityNetty4HttpServerTransport extends Netty4HttpServerTransport { public class SecurityNetty4HttpServerTransport extends Netty4HttpServerTransport {
private static final Logger logger = LogManager.getLogger(SecurityNetty4HttpServerTransport.class);
private final SecurityHttpExceptionHandler securityExceptionHandler; private final SecurityHttpExceptionHandler securityExceptionHandler;
private final IPFilter ipFilter; private final IPFilter ipFilter;

View File

@ -5,6 +5,8 @@
*/ */
package org.elasticsearch.xpack.security.transport.nio; package org.elasticsearch.xpack.security.transport.nio;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.recycler.Recycler; import org.elasticsearch.common.recycler.Recycler;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
@ -40,6 +42,7 @@ import java.util.function.Supplier;
import static org.elasticsearch.xpack.core.XPackSettings.HTTP_SSL_ENABLED; import static org.elasticsearch.xpack.core.XPackSettings.HTTP_SSL_ENABLED;
public class SecurityNioHttpServerTransport extends NioHttpServerTransport { public class SecurityNioHttpServerTransport extends NioHttpServerTransport {
private static final Logger logger = LogManager.getLogger(SecurityNioHttpServerTransport.class);
private final SecurityHttpExceptionHandler securityExceptionHandler; private final SecurityHttpExceptionHandler securityExceptionHandler;
private final IPFilter ipFilter; private final IPFilter ipFilter;

View File

@ -5,6 +5,8 @@
*/ */
package org.elasticsearch.xpack.security.transport.nio; package org.elasticsearch.xpack.security.transport.nio;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
@ -59,6 +61,7 @@ import static org.elasticsearch.xpack.core.security.SecurityField.setting;
* implementation of the SSL/TLS layer is in the {@link SSLChannelContext} and {@link SSLDriver} classes. * implementation of the SSL/TLS layer is in the {@link SSLChannelContext} and {@link SSLDriver} classes.
*/ */
public class SecurityNioTransport extends NioTransport { public class SecurityNioTransport extends NioTransport {
private static final Logger logger = LogManager.getLogger(SecurityNioTransport.class);
private final IPFilter authenticator; private final IPFilter authenticator;
private final SSLService sslService; private final SSLService sslService;