Introduce Log4j 2

This commit introduces Log4j 2 to the stack.
This commit is contained in:
Jason Tedor 2016-08-09 13:34:23 -04:00
parent 5a8f2d7fb3
commit 7da0cdec42
367 changed files with 2906 additions and 2651 deletions

View File

@ -97,8 +97,8 @@ public class PluginBuildPlugin extends BuildPlugin {
// with a full elasticsearch server that includes optional deps
provided "org.locationtech.spatial4j:spatial4j:${project.versions.spatial4j}"
provided "com.vividsolutions:jts:${project.versions.jts}"
provided "log4j:log4j:${project.versions.log4j}"
provided "log4j:apache-log4j-extras:${project.versions.log4j}"
provided "org.apache.logging.log4j:log4j-api:${project.versions.log4j}"
provided "org.apache.logging.log4j:log4j-core:${project.versions.log4j}"
provided "net.java.dev.jna:jna:${project.versions.jna}"
}
}

View File

@ -6,7 +6,7 @@ spatial4j = 0.6
jts = 1.13
jackson = 2.8.1
snakeyaml = 1.15
log4j = 1.2.17
log4j = 2.6.2
slf4j = 1.6.2
jna = 4.2.2

View File

@ -18,13 +18,13 @@
*/
package org.elasticsearch.client.benchmark.ops.bulk;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.client.benchmark.BenchmarkTask;
import org.elasticsearch.client.benchmark.metrics.Sample;
import org.elasticsearch.client.benchmark.metrics.SampleRecorder;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.io.PathUtils;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.ESLoggerFactory;
import java.io.BufferedReader;
@ -135,7 +135,7 @@ public class BulkBenchmarkTask implements BenchmarkTask {
private static final class BulkIndexer implements Runnable {
private static final ESLogger logger = ESLoggerFactory.getLogger(BulkIndexer.class.getName());
private static final Logger logger = ESLoggerFactory.getLogger(BulkIndexer.class.getName());
private final BlockingQueue<List<String>> bulkData;
private final int warmupIterations;

View File

@ -85,8 +85,10 @@ dependencies {
compile "com.vividsolutions:jts:${versions.jts}", optional
// logging
compile "log4j:log4j:${versions.log4j}", optional
compile "log4j:apache-log4j-extras:${versions.log4j}", optional
compile "org.apache.logging.log4j:log4j-api:${versions.log4j}", optional
compile "org.apache.logging.log4j:log4j-core:${versions.log4j}", optional
// to bridge dependencies that are still on Log4j 1 to Log4j 2
compile "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}", optional
compile "net.java.dev.jna:jna:${versions.jna}"
@ -156,21 +158,53 @@ thirdPartyAudit.excludes = [
// from com.fasterxml.jackson.dataformat.yaml.YAMLMapper (jackson-dataformat-yaml)
'com.fasterxml.jackson.databind.ObjectMapper',
// from org.apache.log4j.receivers.net.JMSReceiver (log4j-extras)
// from log4j
'com.fasterxml.jackson.annotation.JsonInclude$Include',
'com.fasterxml.jackson.databind.DeserializationContext',
'com.fasterxml.jackson.databind.JsonMappingException',
'com.fasterxml.jackson.databind.JsonNode',
'com.fasterxml.jackson.databind.Module$SetupContext',
'com.fasterxml.jackson.databind.ObjectReader',
'com.fasterxml.jackson.databind.ObjectWriter',
'com.fasterxml.jackson.databind.SerializerProvider',
'com.fasterxml.jackson.databind.deser.std.StdDeserializer',
'com.fasterxml.jackson.databind.deser.std.StdScalarDeserializer',
'com.fasterxml.jackson.databind.module.SimpleModule',
'com.fasterxml.jackson.databind.ser.impl.SimpleBeanPropertyFilter',
'com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider',
'com.fasterxml.jackson.databind.ser.std.StdScalarSerializer',
'com.fasterxml.jackson.databind.ser.std.StdSerializer',
'com.fasterxml.jackson.dataformat.xml.JacksonXmlModule',
'com.fasterxml.jackson.dataformat.xml.XmlMapper',
'com.fasterxml.jackson.dataformat.xml.util.DefaultXmlPrettyPrinter',
'com.lmax.disruptor.BlockingWaitStrategy',
'com.lmax.disruptor.BusySpinWaitStrategy',
'com.lmax.disruptor.EventFactory',
'com.lmax.disruptor.EventTranslator',
'com.lmax.disruptor.EventTranslatorTwoArg',
'com.lmax.disruptor.EventTranslatorVararg',
'com.lmax.disruptor.ExceptionHandler',
'com.lmax.disruptor.LifecycleAware',
'com.lmax.disruptor.RingBuffer',
'com.lmax.disruptor.Sequence',
'com.lmax.disruptor.SequenceReportingEventHandler',
'com.lmax.disruptor.SleepingWaitStrategy',
'com.lmax.disruptor.TimeoutBlockingWaitStrategy',
'com.lmax.disruptor.WaitStrategy',
'com.lmax.disruptor.YieldingWaitStrategy',
'com.lmax.disruptor.dsl.Disruptor',
'com.lmax.disruptor.dsl.ProducerType',
'javax.jms.Connection',
'javax.jms.ConnectionFactory',
'javax.jms.Destination',
'javax.jms.Message',
'javax.jms.MessageConsumer',
'javax.jms.MessageListener',
'javax.jms.MessageProducer',
'javax.jms.ObjectMessage',
'javax.jms.TopicConnection',
'javax.jms.TopicConnectionFactory',
'javax.jms.TopicPublisher',
'javax.jms.TopicSession',
'javax.jms.TopicSubscriber',
// from org.apache.log4j.net.SMTPAppender (log4j)
'javax.jms.Session',
'javax.mail.Authenticator',
'javax.mail.Message$RecipientType',
'javax.mail.Message',
'javax.mail.Multipart',
'javax.mail.PasswordAuthentication',
'javax.mail.Session',
'javax.mail.Transport',
@ -180,6 +214,36 @@ thirdPartyAudit.excludes = [
'javax.mail.internet.MimeMessage',
'javax.mail.internet.MimeMultipart',
'javax.mail.internet.MimeUtility',
'javax.mail.util.ByteArrayDataSource',
'javax.persistence.AttributeConverter',
'javax.persistence.EntityManager',
'javax.persistence.EntityManagerFactory',
'javax.persistence.EntityTransaction',
'javax.persistence.Persistence',
'javax.persistence.PersistenceException',
'org.apache.commons.compress.compressors.CompressorStreamFactory',
'org.apache.commons.compress.utils.IOUtils',
'org.apache.commons.csv.CSVFormat',
'org.apache.commons.csv.QuoteMode',
'org.apache.kafka.clients.producer.KafkaProducer',
'org.apache.kafka.clients.producer.Producer',
'org.apache.kafka.clients.producer.ProducerRecord',
'org.codehaus.stax2.XMLStreamWriter2',
'org.osgi.framework.AdaptPermission',
'org.osgi.framework.AdminPermission',
'org.osgi.framework.Bundle',
'org.osgi.framework.BundleActivator',
'org.osgi.framework.BundleContext',
'org.osgi.framework.BundleEvent',
'org.osgi.framework.BundleReference',
'org.osgi.framework.FrameworkUtil',
'org.osgi.framework.SynchronousBundleListener',
'org.osgi.framework.wiring.BundleWire',
'org.osgi.framework.wiring.BundleWiring',
'org.zeromq.ZMQ$Context',
'org.zeromq.ZMQ$Socket',
'org.zeromq.ZMQ',
// from org.locationtech.spatial4j.io.GeoJSONReader (spatial4j)
'org.noggit.JSONParser',
]

View File

@ -1,37 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.log4j;
import org.apache.log4j.helpers.ThreadLocalMap;
/**
* Log4j 1.2 MDC breaks because it parses java.version incorrectly (does not handle new java9 versioning).
*
* This hack fixes up the pkg private members as if it had detected the java version correctly.
*/
public class Java9Hack {
public static void fixLog4j() {
if (MDC.mdc.tlm == null) {
MDC.mdc.java1 = false;
MDC.mdc.tlm = new ThreadLocalMap();
}
}
}

View File

@ -1,23 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Hack to fix Log4j 1.2 in Java 9.
*/
package org.apache.log4j;

View File

@ -19,6 +19,7 @@
package org.elasticsearch;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.action.support.replication.ReplicationOperation;
import org.elasticsearch.cluster.action.shard.ShardStateAction;
import org.elasticsearch.common.io.stream.StreamInput;

View File

@ -19,12 +19,12 @@
package org.elasticsearch;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexFormatTooNewException;
import org.apache.lucene.index.IndexFormatTooOldException;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.index.Index;
import org.elasticsearch.rest.RestStatus;
@ -39,7 +39,7 @@ import java.util.Set;
public final class ExceptionsHelper {
private static final ESLogger logger = Loggers.getLogger(ExceptionsHelper.class);
private static final Logger logger = Loggers.getLogger(ExceptionsHelper.class);
public static RuntimeException convertToRuntime(Exception e) {
if (e instanceof RuntimeException) {

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.admin.cluster.health;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.IndicesOptions;
@ -105,7 +106,7 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
@Override
public void onFailure(String source, Exception e) {
logger.error("unexpected failure during [{}]", e, source);
logger.error(new ParameterizedMessage("unexpected failure during [{}]", source), e);
listener.onFailure(e);
}

View File

@ -19,6 +19,8 @@
package org.elasticsearch.action.admin.cluster.reroute;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
@ -33,7 +35,6 @@ import org.elasticsearch.cluster.routing.allocation.RoutingExplanations;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
@ -77,12 +78,12 @@ public class TransportClusterRerouteAction extends TransportMasterNodeAction<Clu
private final ClusterRerouteRequest request;
private final ActionListener<ClusterRerouteResponse> listener;
private final ESLogger logger;
private final Logger logger;
private final AllocationService allocationService;
private volatile ClusterState clusterStateToSend;
private volatile RoutingExplanations explanations;
ClusterRerouteResponseAckedClusterStateUpdateTask(ESLogger logger, AllocationService allocationService, ClusterRerouteRequest request,
ClusterRerouteResponseAckedClusterStateUpdateTask(Logger logger, AllocationService allocationService, ClusterRerouteRequest request,
ActionListener<ClusterRerouteResponse> listener) {
super(Priority.IMMEDIATE, request, listener);
this.request = request;
@ -103,7 +104,7 @@ public class TransportClusterRerouteAction extends TransportMasterNodeAction<Clu
@Override
public void onFailure(String source, Exception e) {
logger.debug("failed to perform [{}]", e, source);
logger.debug(new ParameterizedMessage("failed to perform [{}]", source), e);
super.onFailure(source, e);
}

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.admin.cluster.settings;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
@ -148,7 +149,7 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct
@Override
public void onFailure(String source, Exception e) {
//if the reroute fails we only log
logger.debug("failed to perform [{}]", e, source);
logger.debug(new ParameterizedMessage("failed to perform [{}]", source), e);
listener.onFailure(new ElasticsearchException("reroute after update settings failed", e));
}
@ -166,7 +167,7 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct
@Override
public void onFailure(String source, Exception e) {
logger.debug("failed to perform [{}]", e, source);
logger.debug(new ParameterizedMessage("failed to perform [{}]", source), e);
super.onFailure(source, e);
}

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.admin.indices.close;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.DestructiveOperations;
@ -108,7 +109,7 @@ public class TransportCloseIndexAction extends TransportMasterNodeAction<CloseIn
@Override
public void onFailure(Exception t) {
logger.debug("failed to close indices [{}]", t, (Object)concreteIndices);
logger.debug(new ParameterizedMessage("failed to close indices [{}]", (Object) concreteIndices), t);
listener.onFailure(t);
}
});

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.admin.indices.delete;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.DestructiveOperations;
@ -100,7 +101,7 @@ public class TransportDeleteIndexAction extends TransportMasterNodeAction<Delete
@Override
public void onFailure(Exception t) {
logger.debug("failed to delete indices [{}]", t, concreteIndices);
logger.debug(new ParameterizedMessage("failed to delete indices [{}]", concreteIndices), t);
listener.onFailure(t);
}
});

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.admin.indices.mapping.put;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
@ -92,12 +93,12 @@ public class TransportPutMappingAction extends TransportMasterNodeAction<PutMapp
@Override
public void onFailure(Exception t) {
logger.debug("failed to put mappings on indices [{}], type [{}]", t, concreteIndices, request.type());
logger.debug(new ParameterizedMessage("failed to put mappings on indices [{}], type [{}]", concreteIndices, request.type()), t);
listener.onFailure(t);
}
});
} catch (IndexNotFoundException ex) {
logger.debug("failed to put mappings on indices [{}], type [{}]", ex, request.indices(), request.type());
logger.debug(new ParameterizedMessage("failed to put mappings on indices [{}], type [{}]", request.indices(), request.type()), ex);
throw ex;
}
}

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.admin.indices.open;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.DestructiveOperations;
@ -93,7 +94,7 @@ public class TransportOpenIndexAction extends TransportMasterNodeAction<OpenInde
@Override
public void onFailure(Exception t) {
logger.debug("failed to open indices [{}]", t, (Object)concreteIndices);
logger.debug(new ParameterizedMessage("failed to open indices [{}]", (Object) concreteIndices), t);
listener.onFailure(t);
}
});

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.admin.indices.settings.put;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
@ -92,7 +93,7 @@ public class TransportUpdateSettingsAction extends TransportMasterNodeAction<Upd
@Override
public void onFailure(Exception t) {
logger.debug("failed to update settings on indices [{}]", t, (Object)concreteIndices);
logger.debug(new ParameterizedMessage("failed to update settings on indices [{}]", (Object) concreteIndices), t);
listener.onFailure(t);
}
});

View File

@ -18,6 +18,7 @@
*/
package org.elasticsearch.action.admin.indices.shards;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.FailedNodeException;
@ -41,7 +42,6 @@ import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.collect.ImmutableOpenIntMap;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.CountDown;
import org.elasticsearch.gateway.AsyncShardFetch;
@ -150,7 +150,7 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc
private class InternalAsyncFetch extends AsyncShardFetch<NodeGatewayStartedShards> {
InternalAsyncFetch(ESLogger logger, String type, ShardId shardId, TransportNodesListGatewayStartedShards action) {
InternalAsyncFetch(Logger logger, String type, ShardId shardId, TransportNodesListGatewayStartedShards action) {
super(logger, type, shardId, action);
}

View File

@ -18,6 +18,7 @@
*/
package org.elasticsearch.action.admin.indices.template.delete;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
@ -73,7 +74,7 @@ public class TransportDeleteIndexTemplateAction extends TransportMasterNodeActio
@Override
public void onFailure(Exception e) {
logger.debug("failed to delete templates [{}]", e, request.name());
logger.debug(new ParameterizedMessage("failed to delete templates [{}]", request.name()), e);
listener.onFailure(e);
}
});

View File

@ -18,6 +18,7 @@
*/
package org.elasticsearch.action.admin.indices.template.put;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
@ -94,7 +95,7 @@ public class TransportPutIndexTemplateAction extends TransportMasterNodeAction<P
@Override
public void onFailure(Exception e) {
logger.debug("failed to put template [{}]", e, request.name());
logger.debug(new ParameterizedMessage("failed to put template [{}]", request.name()), e);
listener.onFailure(e);
}
});

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.admin.indices.upgrade.post;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
@ -79,7 +80,7 @@ public class TransportUpgradeSettingsAction extends TransportMasterNodeAction<Up
@Override
public void onFailure(Exception t) {
logger.debug("failed to upgrade minimum compatibility version settings on indices [{}]", t, request.versions().keySet());
logger.debug(new ParameterizedMessage("failed to upgrade minimum compatibility version settings on indices [{}]", request.versions().keySet()), t);
listener.onFailure(t);
}
});

View File

@ -18,9 +18,10 @@
*/
package org.elasticsearch.action.bulk;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
@ -31,7 +32,7 @@ import java.util.concurrent.TimeUnit;
* Abstracts the low-level details of bulk request handling
*/
abstract class BulkRequestHandler {
protected final ESLogger logger;
protected final Logger logger;
protected final Client client;
protected BulkRequestHandler(Client client) {
@ -76,12 +77,12 @@ abstract class BulkRequestHandler {
listener.afterBulk(executionId, bulkRequest, bulkResponse);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
logger.info("Bulk request {} has been cancelled.", e, executionId);
logger.info(new ParameterizedMessage("Bulk request {} has been cancelled.", executionId), e);
if (!afterCalled) {
listener.afterBulk(executionId, bulkRequest, e);
}
} catch (Exception e) {
logger.warn("Failed to execute bulk request {}.", e, executionId);
logger.warn(new ParameterizedMessage("Failed to execute bulk request {}.", executionId), e);
if (!afterCalled) {
listener.afterBulk(executionId, bulkRequest, e);
}
@ -142,10 +143,10 @@ abstract class BulkRequestHandler {
bulkRequestSetupSuccessful = true;
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
logger.info("Bulk request {} has been cancelled.", e, executionId);
logger.info(new ParameterizedMessage("Bulk request {} has been cancelled.", executionId), e);
listener.afterBulk(executionId, bulkRequest, e);
} catch (Exception e) {
logger.warn("Failed to execute bulk request {}.", e, executionId);
logger.warn(new ParameterizedMessage("Failed to execute bulk request {}.", executionId), e);
listener.afterBulk(executionId, bulkRequest, e);
} finally {
if (!bulkRequestSetupSuccessful && acquired) { // if we fail on client.bulk() release the semaphore

View File

@ -18,12 +18,12 @@
*/
package org.elasticsearch.action.bulk;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ActionFuture;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.FutureUtils;
@ -89,7 +89,7 @@ public class Retry {
}
static class AbstractRetryHandler implements ActionListener<BulkResponse> {
private final ESLogger logger;
private final Logger logger;
private final Client client;
private final ActionListener<BulkResponse> listener;
private final Iterator<TimeValue> backoff;

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.bulk;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ActionRequest;
@ -30,8 +31,8 @@ import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.action.index.TransportIndexAction;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.replication.ReplicationRequest;
import org.elasticsearch.action.support.replication.TransportWriteAction;
import org.elasticsearch.action.support.replication.ReplicationResponse.ShardInfo;
import org.elasticsearch.action.support.replication.TransportWriteAction;
import org.elasticsearch.action.update.UpdateHelper;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.action.update.UpdateResponse;
@ -183,9 +184,9 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
private <ReplicationRequestT extends ReplicationRequest<ReplicationRequestT>> void logFailure(Throwable t, String operation, ShardId shardId, ReplicationRequest<ReplicationRequestT> request) {
if (ExceptionsHelper.status(t) == RestStatus.CONFLICT) {
logger.trace("{} failed to execute bulk item ({}) {}", t, shardId, operation, request);
logger.trace(new ParameterizedMessage("{} failed to execute bulk item ({}) {}", shardId, operation, request), t);
} else {
logger.debug("{} failed to execute bulk item ({}) {}", t, shardId, operation, request);
logger.debug(new ParameterizedMessage("{} failed to execute bulk item ({}) {}", shardId, operation, request), t);
}
}

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.get;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.TransportActions;
@ -92,7 +93,7 @@ public class TransportShardMultiGetAction extends TransportSingleShardAction<Mul
if (TransportActions.isShardNotAvailableException(e)) {
throw (ElasticsearchException) e;
} else {
logger.debug("{} failed to execute multi_get for [{}]/[{}]", e, shardId, item.type(), item.id());
logger.debug(new ParameterizedMessage("{} failed to execute multi_get for [{}]/[{}]", shardId, item.type(), item.id()), e);
response.add(request.locations.get(i), new MultiGetResponse.Failure(request.index(), item.type(), item.id(), e));
}
}

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.ingest;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionResponse;
@ -90,7 +91,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio
void processIndexRequest(Task task, String action, ActionListener listener, ActionFilterChain chain, IndexRequest indexRequest) {
executionService.executeIndexRequest(indexRequest, t -> {
logger.error("failed to execute pipeline [{}]", t, indexRequest.getPipeline());
logger.error(new ParameterizedMessage("failed to execute pipeline [{}]", indexRequest.getPipeline()), t);
listener.onFailure(t);
}, success -> {
// TransportIndexAction uses IndexRequest and same action name on the node that receives the request and the node that
@ -105,7 +106,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio
long ingestStartTimeInNanos = System.nanoTime();
BulkRequestModifier bulkRequestModifier = new BulkRequestModifier(original);
executionService.executeBulkRequest(() -> bulkRequestModifier, (indexRequest, exception) -> {
logger.debug("failed to execute pipeline [{}] for document [{}/{}/{}]", exception, indexRequest.getPipeline(), indexRequest.index(), indexRequest.type(), indexRequest.id());
logger.debug(new ParameterizedMessage("failed to execute pipeline [{}] for document [{}/{}/{}]", indexRequest.getPipeline(), indexRequest.index(), indexRequest.type(), indexRequest.id()), exception);
bulkRequestModifier.markCurrentItemAsFailed(exception);
}, (exception) -> {
if (exception != null) {

View File

@ -20,6 +20,8 @@
package org.elasticsearch.action.search;
import com.carrotsearch.hppc.IntArrayList;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopDocs;
import org.elasticsearch.action.ActionListener;
@ -35,7 +37,6 @@ import org.elasticsearch.cluster.routing.ShardIterator;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.SearchPhaseResult;
import org.elasticsearch.search.SearchShardTarget;
@ -58,7 +59,7 @@ import static org.elasticsearch.action.search.TransportSearchHelper.internalSear
abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult> extends AbstractAsyncAction {
protected final ESLogger logger;
protected final Logger logger;
protected final SearchTransportService searchTransportService;
private final IndexNameExpressionResolver indexNameExpressionResolver;
protected final SearchPhaseController searchPhaseController;
@ -77,7 +78,7 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
private final Object shardFailuresMutex = new Object();
protected volatile ScoreDoc[] sortedShardDocs;
protected AbstractSearchAsyncAction(ESLogger logger, SearchTransportService searchTransportService, ClusterService clusterService,
protected AbstractSearchAsyncAction(Logger logger, SearchTransportService searchTransportService, ClusterService clusterService,
IndexNameExpressionResolver indexNameExpressionResolver,
SearchPhaseController searchPhaseController, ThreadPool threadPool, SearchRequest request,
ActionListener<SearchResponse> listener) {
@ -191,7 +192,9 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
innerMoveToSecondPhase();
} catch (Exception e) {
if (logger.isDebugEnabled()) {
logger.debug("{}: Failed to execute [{}] while moving to second phase", e, shardIt.shardId(), request);
logger.debug(
new ParameterizedMessage("{}: Failed to execute [{}] while moving to second phase", shardIt.shardId(), request),
e);
}
raiseEarlyFailure(new ReduceSearchPhaseException(firstPhaseName(), "", e, buildShardFailures()));
}
@ -211,15 +214,21 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
if (totalOps.incrementAndGet() == expectedTotalOps) {
if (logger.isDebugEnabled()) {
if (e != null && !TransportActions.isShardNotAvailableException(e)) {
logger.debug("{}: Failed to execute [{}]", e, shard != null ? shard.shortSummary() : shardIt.shardId(), request);
logger.debug(
new ParameterizedMessage(
"{}: Failed to execute [{}]",
shard != null ? shard.shortSummary() :
shardIt.shardId(),
request),
e);
} else if (logger.isTraceEnabled()) {
logger.trace("{}: Failed to execute [{}]", e, shard, request);
logger.trace(new ParameterizedMessage("{}: Failed to execute [{}]", shard, request), e);
}
}
final ShardSearchFailure[] shardSearchFailures = buildShardFailures();
if (successfulOps.get() == 0) {
if (logger.isDebugEnabled()) {
logger.debug("All shards failed for phase: [{}]", e, firstPhaseName());
logger.debug(new ParameterizedMessage("All shards failed for phase: [{}]", firstPhaseName()), e);
}
// no successful ops, raise an exception
@ -236,10 +245,13 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
final ShardRouting nextShard = shardIt.nextOrNull();
final boolean lastShard = nextShard == null;
// trace log this exception
if (logger.isTraceEnabled()) {
logger.trace("{}: Failed to execute [{}] lastShard [{}]", e, shard != null ? shard.shortSummary() : shardIt.shardId(),
request, lastShard);
}
logger.trace(
() -> new ParameterizedMessage(
"{}: Failed to execute [{}] lastShard [{}]",
shard != null ? shard.shortSummary() : shardIt.shardId(),
request,
lastShard),
e);
if (!lastShard) {
try {
performFirstPhase(shardIndex, shardIt, nextShard);
@ -251,8 +263,14 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
// no more shards active, add a failure
if (logger.isDebugEnabled() && !logger.isTraceEnabled()) { // do not double log this exception
if (e != null && !TransportActions.isShardNotAvailableException(e)) {
logger.debug("{}: Failed to execute [{}] lastShard [{}]", e,
shard != null ? shard.shortSummary() : shardIt.shardId(), request, lastShard);
logger.debug(
new ParameterizedMessage(
"{}: Failed to execute [{}] lastShard [{}]",
shard != null ? shard.shortSummary() :
shardIt.shardId(),
request,
lastShard),
e);
}
}
}

View File

@ -19,12 +19,13 @@
package org.elasticsearch.action.search;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRunnable;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.action.SearchTransportService;
import org.elasticsearch.search.controller.SearchPhaseController;
@ -43,7 +44,7 @@ class SearchDfsQueryAndFetchAsyncAction extends AbstractSearchAsyncAction<DfsSea
private final AtomicArray<QueryFetchSearchResult> queryFetchResults;
SearchDfsQueryAndFetchAsyncAction(ESLogger logger, SearchTransportService searchTransportService,
SearchDfsQueryAndFetchAsyncAction(Logger logger, SearchTransportService searchTransportService,
ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver,
SearchPhaseController searchPhaseController, ThreadPool threadPool,
SearchRequest request, ActionListener<SearchResponse> listener) {
@ -105,7 +106,7 @@ class SearchDfsQueryAndFetchAsyncAction extends AbstractSearchAsyncAction<DfsSea
void onSecondPhaseFailure(Exception e, QuerySearchRequest querySearchRequest, int shardIndex, DfsSearchResult dfsResult,
AtomicInteger counter) {
if (logger.isDebugEnabled()) {
logger.debug("[{}] Failed to execute query phase", e, querySearchRequest.id());
logger.debug(new ParameterizedMessage("[{}] Failed to execute query phase", querySearchRequest.id()), e);
}
this.addShardFailure(shardIndex, dfsResult.shardTarget(), e);
successfulOps.decrementAndGet();

View File

@ -20,13 +20,14 @@
package org.elasticsearch.action.search;
import com.carrotsearch.hppc.IntArrayList;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.lucene.search.ScoreDoc;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRunnable;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.SearchShardTarget;
import org.elasticsearch.search.action.SearchTransportService;
@ -50,7 +51,7 @@ class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<DfsSe
final AtomicArray<FetchSearchResult> fetchResults;
final AtomicArray<IntArrayList> docIdsToLoad;
SearchDfsQueryThenFetchAsyncAction(ESLogger logger, SearchTransportService searchTransportService,
SearchDfsQueryThenFetchAsyncAction(Logger logger, SearchTransportService searchTransportService,
ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver,
SearchPhaseController searchPhaseController, ThreadPool threadPool,
SearchRequest request, ActionListener<SearchResponse> listener) {
@ -113,7 +114,7 @@ class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<DfsSe
void onQueryFailure(Exception e, QuerySearchRequest querySearchRequest, int shardIndex, DfsSearchResult dfsResult,
AtomicInteger counter) {
if (logger.isDebugEnabled()) {
logger.debug("[{}] Failed to execute query phase", e, querySearchRequest.id());
logger.debug(new ParameterizedMessage("[{}] Failed to execute query phase", querySearchRequest.id()), e);
}
this.addShardFailure(shardIndex, dfsResult.shardTarget(), e);
successfulOps.decrementAndGet();
@ -182,7 +183,7 @@ class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<DfsSe
void onFetchFailure(Exception e, ShardFetchSearchRequest fetchSearchRequest, int shardIndex,
SearchShardTarget shardTarget, AtomicInteger counter) {
if (logger.isDebugEnabled()) {
logger.debug("[{}] Failed to execute fetch phase", e, fetchSearchRequest.id());
logger.debug(new ParameterizedMessage("[{}] Failed to execute fetch phase", fetchSearchRequest.id()), e);
}
this.addShardFailure(shardIndex, shardTarget, e);
successfulOps.decrementAndGet();

View File

@ -19,12 +19,12 @@
package org.elasticsearch.action.search;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRunnable;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.search.action.SearchTransportService;
import org.elasticsearch.search.controller.SearchPhaseController;
import org.elasticsearch.search.fetch.QueryFetchSearchResult;
@ -36,7 +36,7 @@ import java.io.IOException;
class SearchQueryAndFetchAsyncAction extends AbstractSearchAsyncAction<QueryFetchSearchResult> {
SearchQueryAndFetchAsyncAction(ESLogger logger, SearchTransportService searchTransportService,
SearchQueryAndFetchAsyncAction(Logger logger, SearchTransportService searchTransportService,
ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver,
SearchPhaseController searchPhaseController, ThreadPool threadPool,
SearchRequest request, ActionListener<SearchResponse> listener) {

View File

@ -20,13 +20,14 @@
package org.elasticsearch.action.search;
import com.carrotsearch.hppc.IntArrayList;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.lucene.search.ScoreDoc;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRunnable;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.SearchShardTarget;
import org.elasticsearch.search.action.SearchTransportService;
@ -46,7 +47,7 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<QuerySea
final AtomicArray<FetchSearchResult> fetchResults;
final AtomicArray<IntArrayList> docIdsToLoad;
SearchQueryThenFetchAsyncAction(ESLogger logger, SearchTransportService searchService,
SearchQueryThenFetchAsyncAction(Logger logger, SearchTransportService searchService,
ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver,
SearchPhaseController searchPhaseController, ThreadPool threadPool,
SearchRequest request, ActionListener<SearchResponse> listener) {
@ -115,7 +116,7 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<QuerySea
void onFetchFailure(Exception e, ShardFetchSearchRequest fetchSearchRequest, int shardIndex, SearchShardTarget shardTarget,
AtomicInteger counter) {
if (logger.isDebugEnabled()) {
logger.debug("[{}] Failed to execute fetch phase", e, fetchSearchRequest.id());
logger.debug(new ParameterizedMessage("[{}] Failed to execute fetch phase", fetchSearchRequest.id()), e);
}
this.addShardFailure(shardIndex, shardTarget, e);
successfulOps.decrementAndGet();

View File

@ -19,12 +19,13 @@
package org.elasticsearch.action.search;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.lucene.search.ScoreDoc;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.action.SearchTransportService;
import org.elasticsearch.search.controller.SearchPhaseController;
@ -40,7 +41,7 @@ import static org.elasticsearch.action.search.TransportSearchHelper.internalScro
class SearchScrollQueryAndFetchAsyncAction extends AbstractAsyncAction {
private final ESLogger logger;
private final Logger logger;
private final SearchPhaseController searchPhaseController;
private final SearchTransportService searchTransportService;
private final SearchScrollRequest request;
@ -52,7 +53,7 @@ class SearchScrollQueryAndFetchAsyncAction extends AbstractAsyncAction {
private final AtomicInteger successfulOps;
private final AtomicInteger counter;
SearchScrollQueryAndFetchAsyncAction(ESLogger logger, ClusterService clusterService,
SearchScrollQueryAndFetchAsyncAction(Logger logger, ClusterService clusterService,
SearchTransportService searchTransportService, SearchPhaseController searchPhaseController,
SearchScrollRequest request, ParsedScrollId scrollId, ActionListener<SearchResponse> listener) {
this.logger = logger;
@ -146,7 +147,7 @@ class SearchScrollQueryAndFetchAsyncAction extends AbstractAsyncAction {
private void onPhaseFailure(Exception e, long searchId, int shardIndex) {
if (logger.isDebugEnabled()) {
logger.debug("[{}] Failed to execute query phase", e, searchId);
logger.debug(new ParameterizedMessage("[{}] Failed to execute query phase", searchId), e);
}
addShardFailure(shardIndex, new ShardSearchFailure(e));
successfulOps.decrementAndGet();

View File

@ -20,12 +20,13 @@
package org.elasticsearch.action.search;
import com.carrotsearch.hppc.IntArrayList;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.lucene.search.ScoreDoc;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.action.SearchTransportService;
import org.elasticsearch.search.controller.SearchPhaseController;
@ -43,7 +44,7 @@ import static org.elasticsearch.action.search.TransportSearchHelper.internalScro
class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction {
private final ESLogger logger;
private final Logger logger;
private final SearchTransportService searchTransportService;
private final SearchPhaseController searchPhaseController;
private final SearchScrollRequest request;
@ -56,7 +57,7 @@ class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction {
private volatile ScoreDoc[] sortedShardDocs;
private final AtomicInteger successfulOps;
SearchScrollQueryThenFetchAsyncAction(ESLogger logger, ClusterService clusterService,
SearchScrollQueryThenFetchAsyncAction(Logger logger, ClusterService clusterService,
SearchTransportService searchTransportService, SearchPhaseController searchPhaseController,
SearchScrollRequest request, ParsedScrollId scrollId, ActionListener<SearchResponse> listener) {
this.logger = logger;
@ -146,7 +147,7 @@ class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction {
void onQueryPhaseFailure(final int shardIndex, final AtomicInteger counter, final long searchId, Exception failure) {
if (logger.isDebugEnabled()) {
logger.debug("[{}] Failed to execute query phase", failure, searchId);
logger.debug(new ParameterizedMessage("[{}] Failed to execute query phase", searchId), failure);
}
addShardFailure(shardIndex, new ShardSearchFailure(failure));
successfulOps.decrementAndGet();

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.search;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
@ -144,7 +145,7 @@ public class TransportClearScrollAction extends HandledTransportAction<ClearScro
}
void onFailedFreedContext(Throwable e, DiscoveryNode node) {
logger.warn("Clear SC failed on node[{}]", e, node);
logger.warn(new ParameterizedMessage("Clear SC failed on node[{}]", node), e);
if (expectedOps.countDown()) {
listener.onResponse(new ClearScrollResponse(false, numberOfFreedSearchContexts.get()));
} else {

View File

@ -19,9 +19,9 @@
package org.elasticsearch.action.support;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ListenableActionFuture;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.threadpool.ThreadPool;
@ -33,7 +33,7 @@ import java.util.List;
*/
public abstract class AbstractListenableActionFuture<T, L> extends AdapterActionFuture<T, L> implements ListenableActionFuture<T> {
private static final ESLogger logger = Loggers.getLogger(AbstractListenableActionFuture.class);
private static final Logger logger = Loggers.getLogger(AbstractListenableActionFuture.class);
final ThreadPool threadPool;
volatile Object listeners;

View File

@ -18,6 +18,7 @@
*/
package org.elasticsearch.action.support;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionResponse;
@ -75,8 +76,9 @@ public abstract class HandledTransportAction<Request extends ActionRequest<Reque
try {
channel.sendResponse(e);
} catch (Exception e1) {
logger.warn("Failed to send error response for action [{}] and request [{}]", e1,
actionName, request);
logger.warn(
new ParameterizedMessage("Failed to send error response for action [{}] and request [{}]", actionName, request),
e1);
}
}
});

View File

@ -19,10 +19,10 @@
package org.elasticsearch.action.support;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.threadpool.ThreadPool;
@ -39,12 +39,12 @@ public final class ThreadedActionListener<Response> implements ActionListener<Re
*/
public static class Wrapper {
private final ESLogger logger;
private final Logger logger;
private final ThreadPool threadPool;
private final boolean threadedListener;
public Wrapper(ESLogger logger, Settings settings, ThreadPool threadPool) {
public Wrapper(Logger logger, Settings settings, ThreadPool threadPool) {
this.logger = logger;
this.threadPool = threadPool;
// Should the action listener be threaded or not by default. Action listeners are automatically threaded for
@ -68,13 +68,13 @@ public final class ThreadedActionListener<Response> implements ActionListener<Re
}
}
private final ESLogger logger;
private final Logger logger;
private final ThreadPool threadPool;
private final String executor;
private final ActionListener<Response> listener;
private final boolean forceExecution;
public ThreadedActionListener(ESLogger logger, ThreadPool threadPool, String executor, ActionListener<Response> listener,
public ThreadedActionListener(Logger logger, ThreadPool threadPool, String executor, ActionListener<Response> listener,
boolean forceExecution) {
this.logger = logger;
this.threadPool = threadPool;

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.support;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.ActionFuture;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequest;
@ -27,7 +28,6 @@ import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.tasks.TaskListener;
@ -165,9 +165,9 @@ public abstract class TransportAction<Request extends ActionRequest<Request>, Re
private final TransportAction<Request, Response> action;
private final AtomicInteger index = new AtomicInteger();
private final ESLogger logger;
private final Logger logger;
private RequestFilterChain(TransportAction<Request, Response> action, ESLogger logger) {
private RequestFilterChain(TransportAction<Request, Response> action, Logger logger) {
this.action = action;
this.logger = logger;
}
@ -201,9 +201,9 @@ public abstract class TransportAction<Request extends ActionRequest<Request>, Re
private final ActionFilter[] filters;
private final AtomicInteger index;
private final ESLogger logger;
private final Logger logger;
private ResponseFilterChain(ActionFilter[] filters, ESLogger logger) {
private ResponseFilterChain(ActionFilter[] filters, Logger logger) {
this.filters = filters;
this.index = new AtomicInteger(filters.length);
this.logger = logger;

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.support.broadcast;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.NoShardAvailableActionException;
import org.elasticsearch.action.support.ActionFilters;
@ -37,10 +38,10 @@ import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.TransportChannel;
import org.elasticsearch.transport.TransportException;
import org.elasticsearch.transport.TransportRequestHandler;
import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.TransportService;
import java.util.concurrent.atomic.AtomicInteger;
@ -224,7 +225,7 @@ public abstract class TransportBroadcastAction<Request extends BroadcastRequest<
if (e != null) {
if (logger.isTraceEnabled()) {
if (!TransportActions.isShardNotAvailableException(e)) {
logger.trace("{}: failed to execute [{}]", e, shard != null ? shard.shortSummary() : shardIt.shardId(), request);
logger.trace(new ParameterizedMessage("{}: failed to execute [{}]", shard != null ? shard.shortSummary() : shardIt.shardId(), request), e);
}
}
}
@ -233,7 +234,7 @@ public abstract class TransportBroadcastAction<Request extends BroadcastRequest<
if (logger.isDebugEnabled()) {
if (e != null) {
if (!TransportActions.isShardNotAvailableException(e)) {
logger.debug("{}: failed to execute [{}]", e, shard != null ? shard.shortSummary() : shardIt.shardId(), request);
logger.debug(new ParameterizedMessage("{}: failed to execute [{}]", shard != null ? shard.shortSummary() : shardIt.shardId(), request), e);
}
}
}

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.support.broadcast.node;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.FailedNodeException;
import org.elasticsearch.action.IndicesRequest;
@ -46,13 +47,13 @@ import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.NodeShouldNotConnectException;
import org.elasticsearch.transport.TransportChannel;
import org.elasticsearch.transport.TransportException;
import org.elasticsearch.transport.TransportRequest;
import org.elasticsearch.transport.TransportRequestHandler;
import org.elasticsearch.transport.TransportResponse;
import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.TransportService;
import java.io.IOException;
@ -363,7 +364,7 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
protected void onNodeFailure(DiscoveryNode node, int nodeIndex, Throwable t) {
String nodeId = node.getId();
if (logger.isDebugEnabled() && !(t instanceof NodeShouldNotConnectException)) {
logger.debug("failed to execute [{}] on node [{}]", t, actionName, nodeId);
logger.debug(new ParameterizedMessage("failed to execute [{}] on node [{}]", actionName, nodeId), t);
}
// this is defensive to protect against the possibility of double invocation
@ -441,11 +442,11 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
shardResults[shardIndex] = failure;
if (TransportActions.isShardNotAvailableException(e)) {
if (logger.isTraceEnabled()) {
logger.trace("[{}] failed to execute operation for shard [{}]", e, actionName, shardRouting.shortSummary());
logger.trace(new ParameterizedMessage("[{}] failed to execute operation for shard [{}]", actionName, shardRouting.shortSummary()), e);
}
} else {
if (logger.isDebugEnabled()) {
logger.debug("[{}] failed to execute operation for shard [{}]", e, actionName, shardRouting.shortSummary());
logger.debug(new ParameterizedMessage("[{}] failed to execute operation for shard [{}]", actionName, shardRouting.shortSummary()), e);
}
}
}

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.support.master;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionListenerResponseHandler;
import org.elasticsearch.action.ActionResponse;
@ -155,7 +156,7 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
public void onFailure(Exception t) {
if (t instanceof Discovery.FailedToCommitClusterStateException
|| (t instanceof NotMasterException)) {
logger.debug("master could not publish cluster state or stepped down before publishing action [{}], scheduling a retry", t, actionName);
logger.debug(new ParameterizedMessage("master could not publish cluster state or stepped down before publishing action [{}], scheduling a retry", actionName), t);
retry(t, MasterNodeChangePredicate.INSTANCE);
} else {
listener.onFailure(t);
@ -209,7 +210,7 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
@Override
public void onTimeout(TimeValue timeout) {
logger.debug("timed out while retrying [{}] after failure (timeout [{}])", failure, actionName, timeout);
logger.debug(new ParameterizedMessage("timed out while retrying [{}] after failure (timeout [{}])", actionName, timeout), failure);
listener.onFailure(new MasterNotDiscoveredException(failure));
}
}, changePredicate

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.support.nodes;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.FailedNodeException;
import org.elasticsearch.action.NoSuchNodeException;
@ -31,13 +32,13 @@ import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.NodeShouldNotConnectException;
import org.elasticsearch.transport.TransportChannel;
import org.elasticsearch.transport.TransportException;
import org.elasticsearch.transport.TransportRequest;
import org.elasticsearch.transport.TransportRequestHandler;
import org.elasticsearch.transport.TransportRequestOptions;
import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.TransportService;
import java.util.ArrayList;
@ -238,7 +239,7 @@ public abstract class TransportNodesAction<NodesRequest extends BaseNodesRequest
private void onFailure(int idx, String nodeId, Throwable t) {
if (logger.isDebugEnabled() && !(t instanceof NodeShouldNotConnectException)) {
logger.debug("failed to execute on node [{}]", t, nodeId);
logger.debug(new ParameterizedMessage("failed to execute on node [{}]", nodeId), t);
}
if (accumulateExceptions()) {
responses.set(idx, new FailedNodeException(nodeId, "Failed node [" + nodeId + "]", t));

View File

@ -18,6 +18,8 @@
*/
package org.elasticsearch.action.support.replication;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ActionListener;
@ -31,7 +33,6 @@ import org.elasticsearch.cluster.routing.IndexRoutingTable;
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.index.engine.VersionConflictEngineException;
import org.elasticsearch.index.shard.ShardId;
@ -56,7 +57,7 @@ public class ReplicationOperation<
ReplicaRequest extends ReplicationRequest<ReplicaRequest>,
PrimaryResultT extends ReplicationOperation.PrimaryResult<ReplicaRequest>
> {
private final ESLogger logger;
private final Logger logger;
private final Request request;
private final Supplier<ClusterState> clusterStateSupplier;
private final String opType;
@ -86,7 +87,7 @@ public class ReplicationOperation<
public ReplicationOperation(Request request, Primary<Request, ReplicaRequest, PrimaryResultT> primary,
ActionListener<PrimaryResultT> listener,
boolean executeOnReplicas, Replicas<ReplicaRequest> replicas,
Supplier<ClusterState> clusterStateSupplier, ESLogger logger, String opType) {
Supplier<ClusterState> clusterStateSupplier, Logger logger, String opType) {
this.executeOnReplicas = executeOnReplicas;
this.replicasProxy = replicas;
this.primary = primary;
@ -189,8 +190,14 @@ public class ReplicationOperation<
@Override
public void onFailure(Exception replicaException) {
logger.trace("[{}] failure while performing [{}] on replica {}, request [{}]", replicaException, shard.shardId(), opType,
shard, replicaRequest);
logger.trace(
new ParameterizedMessage(
"[{}] failure while performing [{}] on replica {}, request [{}]",
shard.shardId(),
opType,
shard,
replicaRequest),
replicaException);
if (ignoreReplicaException(replicaException)) {
decPendingAndFinishIfNeeded();
} else {
@ -198,7 +205,7 @@ public class ReplicationOperation<
shardReplicaFailures.add(new ReplicationResponse.ShardInfo.Failure(
shard.shardId(), shard.currentNodeId(), replicaException, restStatus, false));
String message = String.format(Locale.ROOT, "failed to perform %s on replica %s", opType, shard);
logger.warn("[{}] {}", replicaException, shard.shardId(), message);
logger.warn(new ParameterizedMessage("[{}] {}", shard.shardId(), message), replicaException);
replicasProxy.failShard(shard, replicaRequest.primaryTerm(), message, replicaException,
ReplicationOperation.this::decPendingAndFinishIfNeeded,
ReplicationOperation.this::onPrimaryDemoted,

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.support.replication;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionListenerResponseHandler;
@ -56,7 +57,6 @@ import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.node.NodeClosedException;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.ConnectTransportException;
import org.elasticsearch.transport.TransportChannel;
import org.elasticsearch.transport.TransportChannelResponseHandler;
@ -65,6 +65,7 @@ import org.elasticsearch.transport.TransportRequestHandler;
import org.elasticsearch.transport.TransportRequestOptions;
import org.elasticsearch.transport.TransportResponse;
import org.elasticsearch.transport.TransportResponse.Empty;
import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.TransportService;
import java.io.IOException;
@ -215,7 +216,7 @@ public abstract class TransportReplicationAction<
channel.sendResponse(e);
} catch (Exception inner) {
inner.addSuppressed(e);
logger.warn("Failed to send response for {}", inner, actionName);
logger.warn(new ParameterizedMessage("Failed to send response for {}", actionName), inner);
}
}
});
@ -444,7 +445,9 @@ public abstract class TransportReplicationAction<
@Override
public void onFailure(Exception e) {
if (e instanceof RetryOnReplicaException) {
logger.trace("Retrying operation on replica, action [{}], request [{}]", e, transportReplicaAction, request);
logger.trace(
new ParameterizedMessage("Retrying operation on replica, action [{}], request [{}]", transportReplicaAction, request),
e);
final ThreadContext.StoredContext context = threadPool.getThreadContext().newStoredContext();
observer.waitForNextChange(new ClusterStateObserver.Listener() {
@Override
@ -479,7 +482,9 @@ public abstract class TransportReplicationAction<
channel.sendResponse(e);
} catch (IOException responseException) {
responseException.addSuppressed(e);
logger.warn("failed to send error message back to client for action [{}]", responseException, transportReplicaAction);
logger.warn(
new ParameterizedMessage("failed to send error message back to client for action [{}]", transportReplicaAction),
responseException);
}
}
@ -682,8 +687,12 @@ public abstract class TransportReplicationAction<
final Throwable cause = exp.unwrapCause();
if (cause instanceof ConnectTransportException || cause instanceof NodeClosedException ||
(isPrimaryAction && retryPrimaryException(cause))) {
logger.trace("received an error from node [{}] for request [{}], scheduling a retry", exp, node.getId(),
request);
logger.trace(
new ParameterizedMessage(
"received an error from node [{}] for request [{}], scheduling a retry",
node.getId(),
request),
exp);
retry(exp);
} else {
finishAsFailed(exp);
@ -729,7 +738,7 @@ public abstract class TransportReplicationAction<
void finishAsFailed(Exception failure) {
if (finished.compareAndSet(false, true)) {
setPhase(task, "failed");
logger.trace("operation failed. action [{}], request [{}]", failure, actionName, request);
logger.trace(new ParameterizedMessage("operation failed. action [{}], request [{}]", actionName, request), failure);
listener.onFailure(failure);
} else {
assert false : "finishAsFailed called but operation is already finished";
@ -737,7 +746,9 @@ public abstract class TransportReplicationAction<
}
void finishWithUnexpectedFailure(Exception failure) {
logger.warn("unexpected error during the primary phase for action [{}], request [{}]", failure, actionName, request);
logger.warn(
new ParameterizedMessage("unexpected error during the primary phase for action [{}], request [{}]", actionName, request),
failure);
if (finished.compareAndSet(false, true)) {
setPhase(task, "failed");
listener.onFailure(failure);

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.support.replication;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.WriteRequest;
@ -27,7 +28,6 @@ import org.elasticsearch.cluster.action.shard.ShardStateAction;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.shard.IndexShard;
@ -241,13 +241,13 @@ public abstract class TransportWriteAction<
private final RespondingWriteResult respond;
private final IndexShard indexShard;
private final WriteRequest<?> request;
private final ESLogger logger;
private final Logger logger;
AsyncAfterWriteAction(final IndexShard indexShard,
final WriteRequest<?> request,
@Nullable final Translog.Location location,
final RespondingWriteResult respond,
final ESLogger logger) {
final Logger logger) {
this.indexShard = indexShard;
this.request = request;
boolean waitUntilRefresh = false;

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.support.single.shard;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.NoShardAvailableActionException;
@ -39,10 +40,10 @@ import org.elasticsearch.common.logging.LoggerMessageFormat;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.TransportChannel;
import org.elasticsearch.transport.TransportException;
import org.elasticsearch.transport.TransportRequestHandler;
import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.TransportService;
import java.util.function.Supplier;
@ -187,7 +188,7 @@ public abstract class TransportSingleShardAction<Request extends SingleShardRequ
private void onFailure(ShardRouting shardRouting, Exception e) {
if (logger.isTraceEnabled() && e != null) {
logger.trace("{}: failed to execute [{}]", e, shardRouting, internalRequest.request());
logger.trace(new ParameterizedMessage("{}: failed to execute [{}]", shardRouting, internalRequest.request()), e);
}
perform(e);
}
@ -205,7 +206,7 @@ public abstract class TransportSingleShardAction<Request extends SingleShardRequ
failure = new NoShardAvailableActionException(null, LoggerMessageFormat.format("No shard available for [{}]", internalRequest.request()), failure);
} else {
if (logger.isDebugEnabled()) {
logger.debug("{}: failed to execute [{}]", failure, null, internalRequest.request());
logger.debug(new ParameterizedMessage("{}: failed to execute [{}]", null, internalRequest.request()), failure);
}
}
listener.onFailure(failure);

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.support.tasks;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.ResourceNotFoundException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.FailedNodeException;
@ -38,7 +39,6 @@ import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.NodeShouldNotConnectException;
import org.elasticsearch.transport.TransportChannel;
import org.elasticsearch.transport.TransportException;
@ -46,6 +46,7 @@ import org.elasticsearch.transport.TransportRequest;
import org.elasticsearch.transport.TransportRequestHandler;
import org.elasticsearch.transport.TransportRequestOptions;
import org.elasticsearch.transport.TransportResponse;
import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.TransportService;
import java.io.IOException;
@ -275,7 +276,7 @@ public abstract class TransportTasksAction<
private void onFailure(int idx, String nodeId, Throwable t) {
if (logger.isDebugEnabled() && !(t instanceof NodeShouldNotConnectException)) {
logger.debug("failed to execute on node [{}]", t, nodeId);
logger.debug(new ParameterizedMessage("failed to execute on node [{}]", nodeId), t);
}
if (accumulateExceptions()) {
responses.set(idx, new FailedNodeException(nodeId, "Failed node [" + nodeId + "]", t));

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.termvectors;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.TransportActions;
@ -87,7 +88,7 @@ public class TransportShardMultiTermsVectorAction extends TransportSingleShardAc
if (TransportActions.isShardNotAvailableException(t)) {
throw (ElasticsearchException) t;
} else {
logger.debug("{} failed to execute multi term vectors for [{}]/[{}]", t, shardId, termVectorsRequest.type(), termVectorsRequest.id());
logger.debug(new ParameterizedMessage("{} failed to execute multi term vectors for [{}]/[{}]", shardId, termVectorsRequest.type(), termVectorsRequest.id()), t);
response.add(request.locations.get(i),
new MultiTermVectorsResponse.Failure(request.index(), termVectorsRequest.type(), termVectorsRequest.id(), t));
}

View File

@ -19,6 +19,7 @@
package org.elasticsearch.bootstrap;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.Constants;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.StringHelper;
@ -28,7 +29,6 @@ import org.elasticsearch.cli.Terminal;
import org.elasticsearch.common.PidFile;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.inject.CreationException;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.LogConfigurator;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
@ -81,7 +81,7 @@ final class Bootstrap {
/** initialize native resources */
public static void initializeNatives(Path tmpFile, boolean mlockAll, boolean seccomp, boolean ctrlHandler) {
final ESLogger logger = Loggers.getLogger(Bootstrap.class);
final Logger logger = Loggers.getLogger(Bootstrap.class);
// check if the user is running as root, and bail
if (Natives.definitelyRunningAsRoot()) {
@ -227,7 +227,7 @@ final class Bootstrap {
INSTANCE = new Bootstrap();
Environment environment = initialEnvironment(foreground, pidFile, esSettings);
LogConfigurator.configure(environment.settings(), true);
LogConfigurator.configure(environment);
checkForCustomConfFile();
if (environment.pidFile() != null) {
@ -264,7 +264,7 @@ final class Bootstrap {
if (foreground) {
Loggers.disableConsoleLogging();
}
ESLogger logger = Loggers.getLogger(Bootstrap.class);
Logger logger = Loggers.getLogger(Bootstrap.class);
if (INSTANCE.node != null) {
logger = Loggers.getLogger(Bootstrap.class, Node.NODE_NAME_SETTING.get(INSTANCE.node.settings()));
}
@ -310,7 +310,7 @@ final class Bootstrap {
private static void checkUnsetAndMaybeExit(String confFileSetting, String settingName) {
if (confFileSetting != null && confFileSetting.isEmpty() == false) {
ESLogger logger = Loggers.getLogger(Bootstrap.class);
Logger logger = Loggers.getLogger(Bootstrap.class);
logger.info("{} is no longer supported. elasticsearch.yml must be placed in the config directory and cannot be renamed.", settingName);
exit(1);
}

View File

@ -19,10 +19,11 @@
package org.elasticsearch.bootstrap;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.lucene.util.Constants;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.io.PathUtils;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.BoundTransportAddress;
@ -100,7 +101,7 @@ final class BootstrapCheck {
final boolean enforceLimits,
final boolean ignoreSystemChecks,
final List<Check> checks,
final ESLogger logger) {
final Logger logger) {
final List<String> errors = new ArrayList<>();
final List<String> ignoredErrors = new ArrayList<>();
@ -136,7 +137,7 @@ final class BootstrapCheck {
}
static void log(final ESLogger logger, final String error) {
static void log(final Logger logger, final String error) {
logger.warn(error);
}
@ -417,7 +418,7 @@ final class BootstrapCheck {
}
// visible for testing
long getMaxMapCount(ESLogger logger) {
long getMaxMapCount(Logger logger) {
final Path path = getProcSysVmMaxMapCountPath();
try (final BufferedReader bufferedReader = getBufferedReader(path)) {
final String rawProcSysVmMaxMapCount = readProcSysVmMaxMapCount(bufferedReader);
@ -425,11 +426,11 @@ final class BootstrapCheck {
try {
return parseProcSysVmMaxMapCount(rawProcSysVmMaxMapCount);
} catch (final NumberFormatException e) {
logger.warn("unable to parse vm.max_map_count [{}]", e, rawProcSysVmMaxMapCount);
logger.warn(new ParameterizedMessage("unable to parse vm.max_map_count [{}]", rawProcSysVmMaxMapCount), e);
}
}
} catch (final IOException e) {
logger.warn("I/O exception while trying to read [{}]", e, path);
logger.warn(new ParameterizedMessage("I/O exception while trying to read [{}]", path), e);
}
return -1;
}

View File

@ -116,4 +116,5 @@ class Elasticsearch extends SettingCommand {
static void close(String[] args) throws IOException {
Bootstrap.stop();
}
}

View File

@ -19,9 +19,10 @@
package org.elasticsearch.bootstrap;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.lucene.index.MergePolicy;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import java.io.IOError;
@ -76,14 +77,14 @@ class ElasticsearchUncaughtExceptionHandler implements Thread.UncaughtExceptionH
// visible for testing
void onFatalUncaught(final String threadName, final Throwable t) {
final ESLogger logger = Loggers.getLogger(ElasticsearchUncaughtExceptionHandler.class, loggingPrefixSupplier.get());
logger.error("fatal error in thread [{}], exiting", t, threadName);
final Logger logger = Loggers.getLogger(ElasticsearchUncaughtExceptionHandler.class, loggingPrefixSupplier.get());
logger.error(new ParameterizedMessage("fatal error in thread [{}], exiting", threadName), t);
}
// visible for testing
void onNonFatalUncaught(final String threadName, final Throwable t) {
final ESLogger logger = Loggers.getLogger(ElasticsearchUncaughtExceptionHandler.class, loggingPrefixSupplier.get());
logger.warn("uncaught exception in thread [{}]", t, threadName);
final Logger logger = Loggers.getLogger(ElasticsearchUncaughtExceptionHandler.class, loggingPrefixSupplier.get());
logger.warn(new ParameterizedMessage("uncaught exception in thread [{}]", threadName), t);
}
// visible for testing

View File

@ -22,8 +22,8 @@ package org.elasticsearch.bootstrap;
import com.sun.jna.Native;
import com.sun.jna.NativeLong;
import com.sun.jna.Structure;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.Constants;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import java.util.Arrays;
@ -34,7 +34,7 @@ import java.util.List;
*/
final class JNACLibrary {
private static final ESLogger logger = Loggers.getLogger(JNACLibrary.class);
private static final Logger logger = Loggers.getLogger(JNACLibrary.class);
public static final int MCL_CURRENT = 1;
public static final int ENOMEM = 12;

View File

@ -25,8 +25,8 @@ import com.sun.jna.NativeLong;
import com.sun.jna.Pointer;
import com.sun.jna.Structure;
import com.sun.jna.win32.StdCallLibrary;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.Constants;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import java.util.ArrayList;
@ -40,7 +40,7 @@ import java.util.List;
*/
final class JNAKernel32Library {
private static final ESLogger logger = Loggers.getLogger(JNAKernel32Library.class);
private static final Logger logger = Loggers.getLogger(JNAKernel32Library.class);
// Callbacks must be kept around in order to be able to be called later,
// when the Windows ConsoleCtrlHandler sends an event.

View File

@ -21,8 +21,8 @@ package org.elasticsearch.bootstrap;
import com.sun.jna.Native;
import com.sun.jna.Pointer;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.Constants;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.monitor.jvm.JvmInfo;
@ -39,7 +39,7 @@ class JNANatives {
/** no instantiation */
private JNANatives() {}
private static final ESLogger logger = Loggers.getLogger(JNANatives.class);
private static final Logger logger = Loggers.getLogger(JNANatives.class);
// Set to true, in case native mlockall call was successful
static boolean LOCAL_MLOCKALL = false;

View File

@ -19,10 +19,10 @@
package org.elasticsearch.bootstrap;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.Version;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.io.PathUtils;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import java.io.IOException;
@ -76,7 +76,7 @@ public class JarHell {
*/
public static void checkJarHell() throws Exception {
ClassLoader loader = JarHell.class.getClassLoader();
ESLogger logger = Loggers.getLogger(JarHell.class);
Logger logger = Loggers.getLogger(JarHell.class);
if (logger.isDebugEnabled()) {
logger.debug("java.class.path: {}", System.getProperty("java.class.path"));
logger.debug("sun.boot.class.path: {}", System.getProperty("sun.boot.class.path"));
@ -150,7 +150,7 @@ public class JarHell {
*/
@SuppressForbidden(reason = "needs JarFile for speed, just reading entries")
public static void checkJarHell(URL urls[]) throws Exception {
ESLogger logger = Loggers.getLogger(JarHell.class);
Logger logger = Loggers.getLogger(JarHell.class);
// we don't try to be sneaky and use deprecated/internal/not portable stuff
// like sun.boot.class.path, and with jigsaw we don't yet have a way to get
// a "list" at all. So just exclude any elements underneath the java home

View File

@ -19,7 +19,7 @@
package org.elasticsearch.bootstrap;
import org.elasticsearch.common.logging.ESLogger;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.logging.Loggers;
import java.nio.file.Path;
@ -32,7 +32,7 @@ final class Natives {
/** no instantiation */
private Natives() {}
private static final ESLogger logger = Loggers.getLogger(Natives.class);
private static final Logger logger = Loggers.getLogger(Natives.class);
// marker to determine if the JNA class files are available to the JVM
static final boolean JNA_AVAILABLE;

View File

@ -26,9 +26,9 @@ import com.sun.jna.NativeLong;
import com.sun.jna.Pointer;
import com.sun.jna.Structure;
import com.sun.jna.ptr.PointerByReference;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.Constants;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import java.io.IOException;
@ -92,7 +92,7 @@ import java.util.Map;
*/
// not an example of how to write code!!!
final class Seccomp {
private static final ESLogger logger = Loggers.getLogger(Seccomp.class);
private static final Logger logger = Loggers.getLogger(Seccomp.class);
// Linux implementation, based on seccomp(2) or prctl(2) with bpf filtering

View File

@ -20,7 +20,7 @@
package org.elasticsearch.client.transport;
import com.carrotsearch.hppc.cursors.ObjectCursor;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener;
@ -32,9 +32,8 @@ import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
import org.elasticsearch.client.Requests;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.Randomness;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
@ -43,11 +42,11 @@ import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.common.util.concurrent.FutureUtils;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.ConnectTransportException;
import org.elasticsearch.transport.FutureTransportResponseHandler;
import org.elasticsearch.transport.TransportException;
import org.elasticsearch.transport.TransportRequestOptions;
import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.TransportService;
import java.io.Closeable;
@ -340,7 +339,7 @@ public class TransportClientNodesService extends AbstractComponent implements Cl
transportService.connectToNode(node);
} catch (Exception e) {
it.remove();
logger.debug("failed to connect to discovered node [{}]", e, node);
logger.debug(new ParameterizedMessage("failed to connect to discovered node [{}]", node), e);
}
}
}
@ -377,7 +376,7 @@ public class TransportClientNodesService extends AbstractComponent implements Cl
logger.trace("connecting to listed node (light) [{}]", listedNode);
transportService.connectToNodeLight(listedNode);
} catch (Exception e) {
logger.debug("failed to connect to node [{}], removed from nodes list", e, listedNode);
logger.debug(new ParameterizedMessage("failed to connect to node [{}], removed from nodes list", listedNode), e);
newFilteredNodes.add(listedNode);
continue;
}
@ -409,7 +408,7 @@ public class TransportClientNodesService extends AbstractComponent implements Cl
newNodes.add(listedNode);
}
} catch (Exception e) {
logger.info("failed to get node info for {}, disconnecting...", e, listedNode);
logger.info(new ParameterizedMessage("failed to get node info for {}, disconnecting...", listedNode), e);
transportService.disconnectFromNode(listedNode);
}
}
@ -453,7 +452,7 @@ public class TransportClientNodesService extends AbstractComponent implements Cl
transportService.connectToNodeLight(listedNode);
}
} catch (Exception e) {
logger.debug("failed to connect to node [{}], ignoring...", e, listedNode);
logger.debug(new ParameterizedMessage("failed to connect to node [{}], ignoring...", listedNode), e);
latch.countDown();
return;
}
@ -482,13 +481,18 @@ public class TransportClientNodesService extends AbstractComponent implements Cl
@Override
public void handleException(TransportException e) {
logger.info("failed to get local cluster state for {}, disconnecting...", e, listedNode);
logger.info(
new ParameterizedMessage(
"failed to get local cluster state for {}, disconnecting...",
listedNode),
e);
transportService.disconnectFromNode(listedNode);
latch.countDown();
}
});
} catch (Exception e) {
logger.info("failed to get local cluster state info for {}, disconnecting...", e, listedNode);
logger.info(
new ParameterizedMessage("failed to get local cluster state info for {}, disconnecting...", listedNode), e);
transportService.disconnectFromNode(listedNode);
latch.countDown();
}

View File

@ -19,6 +19,7 @@
package org.elasticsearch.cluster;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
import org.elasticsearch.cluster.action.index.NodeMappingRefreshAction;
import org.elasticsearch.cluster.action.shard.ShardStateAction;
@ -44,17 +45,16 @@ import org.elasticsearch.cluster.routing.allocation.decider.ConcurrentRebalanceA
import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider;
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.NodeVersionAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.RebalanceOnlyWhenActiveAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.ReplicaAfterPrimaryActiveAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.SnapshotInProgressAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;

View File

@ -19,10 +19,10 @@
package org.elasticsearch.cluster;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.ThreadContext;
@ -35,7 +35,7 @@ import java.util.concurrent.atomic.AtomicReference;
*/
public class ClusterStateObserver {
protected final ESLogger logger;
protected final Logger logger;
public final ChangePredicate MATCH_ALL_CHANGES_PREDICATE = new EventPredicate() {
@ -58,7 +58,7 @@ public class ClusterStateObserver {
volatile boolean timedOut;
public ClusterStateObserver(ClusterService clusterService, ESLogger logger, ThreadContext contextHolder) {
public ClusterStateObserver(ClusterService clusterService, Logger logger, ThreadContext contextHolder) {
this(clusterService, new TimeValue(60000), logger, contextHolder);
}
@ -67,7 +67,7 @@ public class ClusterStateObserver {
* will fail any existing or new #waitForNextChange calls. Set to null
* to wait indefinitely
*/
public ClusterStateObserver(ClusterService clusterService, @Nullable TimeValue timeout, ESLogger logger, ThreadContext contextHolder) {
public ClusterStateObserver(ClusterService clusterService, @Nullable TimeValue timeout, Logger logger, ThreadContext contextHolder) {
this.clusterService = clusterService;
this.lastObservedState = new AtomicReference<>(new ObservedState(clusterService.state()));
this.timeOutValue = timeout;

View File

@ -19,6 +19,7 @@
package org.elasticsearch.cluster;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.LatchedActionListener;
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
@ -39,7 +40,6 @@ import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
@ -379,7 +379,7 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu
return clusterInfo;
}
static void buildShardLevelInfo(ESLogger logger, ShardStats[] stats, ImmutableOpenMap.Builder<String, Long> newShardSizes,
static void buildShardLevelInfo(Logger logger, ShardStats[] stats, ImmutableOpenMap.Builder<String, Long> newShardSizes,
ImmutableOpenMap.Builder<ShardRouting, String> newShardRoutingToDataPath, ClusterState state) {
MetaData meta = state.getMetaData();
for (ShardStats s : stats) {
@ -402,7 +402,7 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu
}
}
static void fillDiskUsagePerNode(ESLogger logger, List<NodeStats> nodeStatsArray,
static void fillDiskUsagePerNode(Logger logger, List<NodeStats> nodeStatsArray,
ImmutableOpenMap.Builder<String, DiskUsage> newLeastAvaiableUsages,
ImmutableOpenMap.Builder<String, DiskUsage> newMostAvaiableUsages) {
for (NodeStats nodeStats : nodeStatsArray) {

View File

@ -18,6 +18,7 @@
*/
package org.elasticsearch.cluster;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.Inject;
@ -91,7 +92,7 @@ public class NodeConnectionsService extends AbstractLifecycleComponent {
try {
transportService.disconnectFromNode(node);
} catch (Exception e) {
logger.warn("failed to disconnect to node [{}]", e, node);
logger.warn(new ParameterizedMessage("failed to disconnect to node [{}]", node), e);
}
}
}
@ -113,7 +114,7 @@ public class NodeConnectionsService extends AbstractLifecycleComponent {
nodeFailureCount = nodeFailureCount + 1;
// log every 6th failure
if ((nodeFailureCount % 6) == 1) {
logger.warn("failed to connect to node {} (tried [{}] times)", e, node, nodeFailureCount);
logger.warn(new ParameterizedMessage("failed to connect to node {} (tried [{}] times)", node, nodeFailureCount), e);
}
nodes.put(node, nodeFailureCount);
}

View File

@ -19,6 +19,8 @@
package org.elasticsearch.cluster.action.shard;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.cluster.ClusterChangedEvent;
@ -43,7 +45,6 @@ import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.discovery.Discovery;
@ -108,7 +109,7 @@ public class ShardStateAction extends AbstractComponent {
if (isMasterChannelException(exp)) {
waitForNewMasterAndRetry(actionName, observer, shardEntry, listener);
} else {
logger.warn("{} unexpected failure while sending request [{}] to [{}] for shard entry [{}]", exp, shardEntry.shardId, actionName, masterNode, shardEntry);
logger.warn(new ParameterizedMessage("{} unexpected failure while sending request [{}] to [{}] for shard entry [{}]", shardEntry.shardId, actionName, masterNode, shardEntry), exp);
listener.onFailure(exp instanceof RemoteTransportException ? (Exception) (exp.getCause() instanceof Exception ? exp.getCause() : new ElasticsearchException(exp.getCause())) : exp);
}
}
@ -169,7 +170,7 @@ public class ShardStateAction extends AbstractComponent {
@Override
public void onClusterServiceClose() {
logger.warn("{} node closed while execution action [{}] for shard entry [{}]", shardEntry.failure, shardEntry.shardId, actionName, shardEntry);
logger.warn(new ParameterizedMessage("{} node closed while execution action [{}] for shard entry [{}]", shardEntry.shardId, actionName, shardEntry), shardEntry.failure);
listener.onFailure(new NodeClosedException(clusterService.localNode()));
}
@ -184,9 +185,9 @@ public class ShardStateAction extends AbstractComponent {
private static class ShardFailedTransportHandler implements TransportRequestHandler<ShardEntry> {
private final ClusterService clusterService;
private final ShardFailedClusterStateTaskExecutor shardFailedClusterStateTaskExecutor;
private final ESLogger logger;
private final Logger logger;
public ShardFailedTransportHandler(ClusterService clusterService, ShardFailedClusterStateTaskExecutor shardFailedClusterStateTaskExecutor, ESLogger logger) {
public ShardFailedTransportHandler(ClusterService clusterService, ShardFailedClusterStateTaskExecutor shardFailedClusterStateTaskExecutor, Logger logger) {
this.clusterService = clusterService;
this.shardFailedClusterStateTaskExecutor = shardFailedClusterStateTaskExecutor;
this.logger = logger;
@ -194,7 +195,7 @@ public class ShardStateAction extends AbstractComponent {
@Override
public void messageReceived(ShardEntry request, TransportChannel channel) throws Exception {
logger.warn("{} received shard failed for {}", request.failure, request.shardId, request);
logger.warn(new ParameterizedMessage("{} received shard failed for {}", request.shardId, request), request.failure);
clusterService.submitStateUpdateTask(
"shard-failed",
request,
@ -203,12 +204,12 @@ public class ShardStateAction extends AbstractComponent {
new ClusterStateTaskListener() {
@Override
public void onFailure(String source, Exception e) {
logger.error("{} unexpected failure while failing shard [{}]", e, request.shardId, request);
logger.error(new ParameterizedMessage("{} unexpected failure while failing shard [{}]", request.shardId, request), e);
try {
channel.sendResponse(e);
} catch (Exception channelException) {
channelException.addSuppressed(e);
logger.warn("{} failed to send failure [{}] while failing shard [{}]", channelException, request.shardId, e, request);
logger.warn(new ParameterizedMessage("{} failed to send failure [{}] while failing shard [{}]", request.shardId, e, request), channelException);
}
}
@ -218,7 +219,7 @@ public class ShardStateAction extends AbstractComponent {
try {
channel.sendResponse(new NotMasterException(source));
} catch (Exception channelException) {
logger.warn("{} failed to send no longer master while failing shard [{}]", channelException, request.shardId, request);
logger.warn(new ParameterizedMessage("{} failed to send no longer master while failing shard [{}]", request.shardId, request), channelException);
}
}
@ -227,7 +228,7 @@ public class ShardStateAction extends AbstractComponent {
try {
channel.sendResponse(TransportResponse.Empty.INSTANCE);
} catch (Exception channelException) {
logger.warn("{} failed to send response while failing shard [{}]", channelException, request.shardId, request);
logger.warn(new ParameterizedMessage("{} failed to send response while failing shard [{}]", request.shardId, request), channelException);
}
}
}
@ -238,9 +239,9 @@ public class ShardStateAction extends AbstractComponent {
public static class ShardFailedClusterStateTaskExecutor implements ClusterStateTaskExecutor<ShardEntry> {
private final AllocationService allocationService;
private final RoutingService routingService;
private final ESLogger logger;
private final Logger logger;
public ShardFailedClusterStateTaskExecutor(AllocationService allocationService, RoutingService routingService, ESLogger logger) {
public ShardFailedClusterStateTaskExecutor(AllocationService allocationService, RoutingService routingService, Logger logger) {
this.allocationService = allocationService;
this.routingService = routingService;
this.logger = logger;
@ -315,7 +316,7 @@ public class ShardStateAction extends AbstractComponent {
}
batchResultBuilder.successes(tasksToBeApplied);
} catch (Exception e) {
logger.warn("failed to apply failed shards {}", e, shardRoutingsToBeApplied);
logger.warn(new ParameterizedMessage("failed to apply failed shards {}", shardRoutingsToBeApplied), e);
// failures are communicated back to the requester
// cluster state will not be updated in this case
batchResultBuilder.failures(tasksToBeApplied, e);
@ -352,9 +353,9 @@ public class ShardStateAction extends AbstractComponent {
private static class ShardStartedTransportHandler implements TransportRequestHandler<ShardEntry> {
private final ClusterService clusterService;
private final ShardStartedClusterStateTaskExecutor shardStartedClusterStateTaskExecutor;
private final ESLogger logger;
private final Logger logger;
public ShardStartedTransportHandler(ClusterService clusterService, ShardStartedClusterStateTaskExecutor shardStartedClusterStateTaskExecutor, ESLogger logger) {
public ShardStartedTransportHandler(ClusterService clusterService, ShardStartedClusterStateTaskExecutor shardStartedClusterStateTaskExecutor, Logger logger) {
this.clusterService = clusterService;
this.shardStartedClusterStateTaskExecutor = shardStartedClusterStateTaskExecutor;
this.logger = logger;
@ -375,9 +376,9 @@ public class ShardStateAction extends AbstractComponent {
public static class ShardStartedClusterStateTaskExecutor implements ClusterStateTaskExecutor<ShardEntry>, ClusterStateTaskListener {
private final AllocationService allocationService;
private final ESLogger logger;
private final Logger logger;
public ShardStartedClusterStateTaskExecutor(AllocationService allocationService, ESLogger logger) {
public ShardStartedClusterStateTaskExecutor(AllocationService allocationService, Logger logger) {
this.allocationService = allocationService;
this.logger = logger;
}
@ -431,7 +432,7 @@ public class ShardStateAction extends AbstractComponent {
}
builder.successes(tasksToBeApplied);
} catch (Exception e) {
logger.warn("failed to apply started shards {}", e, shardRoutingsToBeApplied);
logger.warn(new ParameterizedMessage("failed to apply started shards {}", shardRoutingsToBeApplied), e);
builder.failures(tasksToBeApplied, e);
}
@ -440,7 +441,7 @@ public class ShardStateAction extends AbstractComponent {
@Override
public void onFailure(String source, Exception e) {
logger.error("unexpected failure during [{}]", e, source);
logger.error(new ParameterizedMessage("unexpected failure during [{}]", source), e);
}
}

View File

@ -22,6 +22,7 @@ package org.elasticsearch.cluster.metadata;
import com.carrotsearch.hppc.ObjectHashSet;
import com.carrotsearch.hppc.cursors.ObjectCursor;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.cluster.Diff;
import org.elasticsearch.cluster.Diffable;
@ -38,7 +39,6 @@ import org.elasticsearch.common.collect.HppcMaps;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
@ -758,7 +758,7 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
/** As of 2.0 we require units for time and byte-sized settings. This methods adds default units to any cluster settings that don't
* specify a unit. */
public static MetaData addDefaultUnitsIfNeeded(ESLogger logger, MetaData metaData) {
public static MetaData addDefaultUnitsIfNeeded(Logger logger, MetaData metaData) {
Settings.Builder newPersistentSettings = null;
for(Map.Entry<String,String> ent : metaData.persistentSettings().getAsMap().entrySet()) {
String settingName = ent.getKey();

View File

@ -21,6 +21,7 @@ package org.elasticsearch.cluster.metadata;
import com.carrotsearch.hppc.cursors.ObjectCursor;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
@ -446,9 +447,9 @@ public class MetaDataCreateIndexService extends AbstractComponent {
@Override
public void onFailure(String source, Exception e) {
if (e instanceof IndexAlreadyExistsException) {
logger.trace("[{}] failed to create", e, request.index());
logger.trace(new ParameterizedMessage("[{}] failed to create", request.index()), e);
} else {
logger.debug("[{}] failed to create", e, request.index());
logger.debug(new ParameterizedMessage("[{}] failed to create", request.index()), e);
}
super.onFailure(source, e);
}

View File

@ -20,7 +20,6 @@
package org.elasticsearch.cluster.metadata;
import com.carrotsearch.hppc.cursors.ObjectCursor;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesClusterStateUpdateRequest;

View File

@ -20,6 +20,7 @@
package org.elasticsearch.cluster.metadata;
import com.carrotsearch.hppc.cursors.ObjectCursor;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingClusterStateUpdateRequest;
import org.elasticsearch.cluster.AckedClusterStateTaskListener;
@ -193,7 +194,7 @@ public class MetaDataMappingService extends AbstractComponent {
}
}
} catch (Exception e) {
logger.warn("[{}] failed to refresh-mapping in cluster state", e, index);
logger.warn(new ParameterizedMessage("[{}] failed to refresh-mapping in cluster state", index), e);
}
return dirty;
}
@ -207,7 +208,7 @@ public class MetaDataMappingService extends AbstractComponent {
refreshTask,
ClusterStateTaskConfig.build(Priority.HIGH),
refreshExecutor,
(source, e) -> logger.warn("failure during [{}]", e, source)
(source, e) -> logger.warn(new ParameterizedMessage("failure during [{}]", source), e)
);
}

View File

@ -21,6 +21,7 @@ package org.elasticsearch.cluster.routing;
import com.carrotsearch.hppc.ObjectIntHashMap;
import com.carrotsearch.hppc.cursors.ObjectCursor;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexMetaData;
@ -30,7 +31,6 @@ import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Randomness;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.shard.ShardId;
@ -452,7 +452,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
*
* @return the started shard
*/
public ShardRouting startShard(ESLogger logger, ShardRouting initializingShard, RoutingChangesObserver routingChangesObserver) {
public ShardRouting startShard(Logger logger, ShardRouting initializingShard, RoutingChangesObserver routingChangesObserver) {
ensureMutable();
ShardRouting startedShard = started(initializingShard);
logger.trace("{} marked shard as started (routing: {})", initializingShard.shardId(), initializingShard);
@ -484,7 +484,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
* - If shard is a (primary or replica) relocation target, this also clears the relocation information on the source shard.
*
*/
public void failShard(ESLogger logger, ShardRouting failedShard, UnassignedInfo unassignedInfo, IndexMetaData indexMetaData,
public void failShard(Logger logger, ShardRouting failedShard, UnassignedInfo unassignedInfo, IndexMetaData indexMetaData,
RoutingChangesObserver routingChangesObserver) {
ensureMutable();
assert failedShard.assignedToNode() : "only assigned shards can be failed";

View File

@ -19,6 +19,7 @@
package org.elasticsearch.cluster.routing;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateUpdateTask;
@ -113,16 +114,16 @@ public class RoutingService extends AbstractLifecycleComponent {
rerouting.set(false);
ClusterState state = clusterService.state();
if (logger.isTraceEnabled()) {
logger.error("unexpected failure during [{}], current state:\n{}", e, source, state.prettyPrint());
logger.error(new ParameterizedMessage("unexpected failure during [{}], current state:\n{}", source, state.prettyPrint()), e);
} else {
logger.error("unexpected failure during [{}], current state version [{}]", e, source, state.version());
logger.error(new ParameterizedMessage("unexpected failure during [{}], current state version [{}]", source, state.version()), e);
}
}
});
} catch (Exception e) {
rerouting.set(false);
ClusterState state = clusterService.state();
logger.warn("failed to reroute routing table, current state:\n{}", e, state.prettyPrint());
logger.warn(new ParameterizedMessage("failed to reroute routing table, current state:\n{}", state.prettyPrint()), e);
}
}
}

View File

@ -19,6 +19,7 @@
package org.elasticsearch.cluster.routing.allocation.allocator;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.IntroSorter;
import org.elasticsearch.cluster.metadata.IndexMetaData;
@ -37,7 +38,6 @@ import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
@ -209,7 +209,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
* A {@link Balancer}
*/
public static class Balancer {
private final ESLogger logger;
private final Logger logger;
private final Map<String, ModelNode> nodes = new HashMap<>();
private final RoutingAllocation allocation;
private final RoutingNodes routingNodes;
@ -219,7 +219,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
private final MetaData metaData;
private final float avgShardsPerNode;
public Balancer(ESLogger logger, RoutingAllocation allocation, WeightFunction weight, float threshold) {
public Balancer(Logger logger, RoutingAllocation allocation, WeightFunction weight, float threshold) {
this.logger = logger;
this.allocation = allocation;
this.weight = weight;

View File

@ -19,6 +19,8 @@
package org.elasticsearch.cluster.service;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.cluster.AckedClusterStateTaskListener;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterName;
@ -43,7 +45,6 @@ import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
@ -554,9 +555,16 @@ public class ClusterService extends AbstractLifecycleComponent {
} catch (Exception e) {
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS)));
if (logger.isTraceEnabled()) {
logger.trace("failed to execute cluster state update in [{}], state:\nversion [{}], source [{}]\n{}{}{}", e, executionTime,
previousClusterState.version(), tasksSummary, previousClusterState.nodes().prettyPrint(),
previousClusterState.routingTable().prettyPrint(), previousClusterState.getRoutingNodes().prettyPrint());
logger.trace(
new ParameterizedMessage(
"failed to execute cluster state update in [{}], state:\nversion [{}], source [{}]\n{}{}{}",
executionTime,
previousClusterState.version(),
tasksSummary,
previousClusterState.nodes().prettyPrint(),
previousClusterState.routingTable().prettyPrint(),
previousClusterState.getRoutingNodes().prettyPrint()),
e);
}
warnAboutSlowTaskIfNeeded(executionTime, tasksSummary);
batchResult = ClusterStateTaskExecutor.BatchResult.<T>builder()
@ -587,7 +595,7 @@ public class ClusterService extends AbstractLifecycleComponent {
executionResult.handle(
() -> proccessedListeners.add(updateTask),
ex -> {
logger.debug("cluster state update task {} failed", ex, updateTask.toString(executor));
logger.debug(new ParameterizedMessage("cluster state update task {} failed", updateTask.toString(executor)), ex);
updateTask.listener.onFailure(updateTask.source, ex);
}
);
@ -670,7 +678,12 @@ public class ClusterService extends AbstractLifecycleComponent {
try {
clusterStatePublisher.accept(clusterChangedEvent, ackListener);
} catch (Discovery.FailedToCommitClusterStateException t) {
logger.warn("failing [{}]: failed to commit cluster state version [{}]", t, tasksSummary, newClusterState.version());
logger.warn(
new ParameterizedMessage(
"failing [{}]: failed to commit cluster state version [{}]",
tasksSummary,
newClusterState.version()),
t);
proccessedListeners.forEach(task -> task.listener.onFailure(task.source, t));
return;
}
@ -713,7 +726,9 @@ public class ClusterService extends AbstractLifecycleComponent {
try {
ackListener.onNodeAck(newClusterState.nodes().getLocalNode(), null);
} catch (Exception e) {
logger.debug("error while processing ack for master node [{}]", e, newClusterState.nodes().getLocalNode());
logger.debug(
new ParameterizedMessage("error while processing ack for master node [{}]", newClusterState.nodes().getLocalNode()),
e);
}
}
@ -724,7 +739,11 @@ public class ClusterService extends AbstractLifecycleComponent {
try {
executor.clusterStatePublished(clusterChangedEvent);
} catch (Exception e) {
logger.error("exception thrown while notifying executor of new cluster state publication [{}]", e, tasksSummary);
logger.error(
new ParameterizedMessage(
"exception thrown while notifying executor of new cluster state publication [{}]",
tasksSummary),
e);
}
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS)));
@ -733,8 +752,15 @@ public class ClusterService extends AbstractLifecycleComponent {
warnAboutSlowTaskIfNeeded(executionTime, tasksSummary);
} catch (Exception e) {
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS)));
logger.warn("failed to apply updated cluster state in [{}]:\nversion [{}], uuid [{}], source [{}]\n{}", e, executionTime,
newClusterState.version(), newClusterState.stateUUID(), tasksSummary, newClusterState.prettyPrint());
logger.warn(
new ParameterizedMessage(
"failed to apply updated cluster state in [{}]:\nversion [{}], uuid [{}], source [{}]\n{}",
executionTime,
newClusterState.version(),
newClusterState.stateUUID(),
tasksSummary,
newClusterState.prettyPrint()),
e);
// TODO: do we want to call updateTask.onFailure here?
}
@ -743,7 +769,7 @@ public class ClusterService extends AbstractLifecycleComponent {
// this one is overridden in tests so we can control time
protected long currentTimeInNanos() {return System.nanoTime();}
private static SafeClusterStateTaskListener safe(ClusterStateTaskListener listener, ESLogger logger) {
private static SafeClusterStateTaskListener safe(ClusterStateTaskListener listener, Logger logger) {
if (listener instanceof AckedClusterStateTaskListener) {
return new SafeAckedClusterStateTaskListener((AckedClusterStateTaskListener) listener, logger);
} else {
@ -753,9 +779,9 @@ public class ClusterService extends AbstractLifecycleComponent {
private static class SafeClusterStateTaskListener implements ClusterStateTaskListener {
private final ClusterStateTaskListener listener;
private final ESLogger logger;
private final Logger logger;
public SafeClusterStateTaskListener(ClusterStateTaskListener listener, ESLogger logger) {
public SafeClusterStateTaskListener(ClusterStateTaskListener listener, Logger logger) {
this.listener = listener;
this.logger = logger;
}
@ -766,7 +792,7 @@ public class ClusterService extends AbstractLifecycleComponent {
listener.onFailure(source, e);
} catch (Exception inner) {
inner.addSuppressed(e);
logger.error("exception thrown by listener notifying of failure from [{}]", inner, source);
logger.error(new ParameterizedMessage("exception thrown by listener notifying of failure from [{}]", source), inner);
}
}
@ -775,7 +801,9 @@ public class ClusterService extends AbstractLifecycleComponent {
try {
listener.onNoLongerMaster(source);
} catch (Exception e) {
logger.error("exception thrown by listener while notifying no longer master from [{}]", e, source);
logger.error(
new ParameterizedMessage("exception thrown by listener while notifying no longer master from [{}]", source),
e);
}
}
@ -785,21 +813,22 @@ public class ClusterService extends AbstractLifecycleComponent {
listener.clusterStateProcessed(source, oldState, newState);
} catch (Exception e) {
logger.error(
new ParameterizedMessage(
"exception thrown by listener while notifying of cluster state processed from [{}], old cluster state:\n" +
"{}\nnew cluster state:\n{}",
e,
source,
oldState.prettyPrint(),
newState.prettyPrint());
newState.prettyPrint()),
e);
}
}
}
private static class SafeAckedClusterStateTaskListener extends SafeClusterStateTaskListener implements AckedClusterStateTaskListener {
private final AckedClusterStateTaskListener listener;
private final ESLogger logger;
private final Logger logger;
public SafeAckedClusterStateTaskListener(AckedClusterStateTaskListener listener, ESLogger logger) {
public SafeAckedClusterStateTaskListener(AckedClusterStateTaskListener listener, Logger logger) {
super(listener, logger);
this.listener = listener;
this.logger = logger;
@ -996,7 +1025,7 @@ public class ClusterService extends AbstractLifecycleComponent {
private static class AckCountDownListener implements Discovery.AckListener {
private static final ESLogger logger = Loggers.getLogger(AckCountDownListener.class);
private static final Logger logger = Loggers.getLogger(AckCountDownListener.class);
private final AckedClusterStateTaskListener ackedTaskListener;
private final CountDown countDown;
@ -1040,7 +1069,9 @@ public class ClusterService extends AbstractLifecycleComponent {
logger.trace("ack received from node [{}], cluster_state update (version: {})", node, clusterStateVersion);
} else {
this.lastFailure = e;
logger.debug("ack received from node [{}], cluster_state update (version: {})", e, node, clusterStateVersion);
logger.debug(
new ParameterizedMessage("ack received from node [{}], cluster_state update (version: {})", node, clusterStateVersion),
e);
}
if (countDown.countDown()) {

View File

@ -16,13 +16,14 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common;
package org.elasticsearch.common;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* Annotation to suppress logging usage checks errors inside a whole class or a method.
*/

View File

@ -19,7 +19,7 @@
package org.elasticsearch.common.breaker;
import org.elasticsearch.common.logging.ESLogger;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.indices.breaker.BreakerSettings;
import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService;
@ -36,7 +36,7 @@ public class ChildMemoryCircuitBreaker implements CircuitBreaker {
private final double overheadConstant;
private final AtomicLong used;
private final AtomicLong trippedCount;
private final ESLogger logger;
private final Logger logger;
private final HierarchyCircuitBreakerService parent;
private final String name;
@ -48,7 +48,7 @@ public class ChildMemoryCircuitBreaker implements CircuitBreaker {
* @param parent parent circuit breaker service to delegate tripped breakers to
* @param name the name of the breaker
*/
public ChildMemoryCircuitBreaker(BreakerSettings settings, ESLogger logger,
public ChildMemoryCircuitBreaker(BreakerSettings settings, Logger logger,
HierarchyCircuitBreakerService parent, String name) {
this(settings, null, logger, parent, name);
}
@ -64,7 +64,7 @@ public class ChildMemoryCircuitBreaker implements CircuitBreaker {
* @param oldBreaker the previous circuit breaker to inherit the used value from (starting offset)
*/
public ChildMemoryCircuitBreaker(BreakerSettings settings, ChildMemoryCircuitBreaker oldBreaker,
ESLogger logger, HierarchyCircuitBreakerService parent, String name) {
Logger logger, HierarchyCircuitBreakerService parent, String name) {
this.name = name;
this.settings = settings;
this.memoryBytesLimit = settings.getLimit();

View File

@ -18,7 +18,7 @@
*/
package org.elasticsearch.common.breaker;
import org.elasticsearch.common.logging.ESLogger;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.unit.ByteSizeValue;
import java.util.concurrent.atomic.AtomicLong;
@ -33,7 +33,7 @@ public class MemoryCircuitBreaker implements CircuitBreaker {
private final double overheadConstant;
private final AtomicLong used;
private final AtomicLong trippedCount;
private final ESLogger logger;
private final Logger logger;
/**
@ -43,7 +43,7 @@ public class MemoryCircuitBreaker implements CircuitBreaker {
* @param limit circuit breaker limit
* @param overheadConstant constant multiplier for byte estimations
*/
public MemoryCircuitBreaker(ByteSizeValue limit, double overheadConstant, ESLogger logger) {
public MemoryCircuitBreaker(ByteSizeValue limit, double overheadConstant, Logger logger) {
this(limit, overheadConstant, null, logger);
}
@ -56,7 +56,7 @@ public class MemoryCircuitBreaker implements CircuitBreaker {
* @param overheadConstant constant multiplier for byte estimations
* @param oldBreaker the previous circuit breaker to inherit the used value from (starting offset)
*/
public MemoryCircuitBreaker(ByteSizeValue limit, double overheadConstant, MemoryCircuitBreaker oldBreaker, ESLogger logger) {
public MemoryCircuitBreaker(ByteSizeValue limit, double overheadConstant, MemoryCircuitBreaker oldBreaker, Logger logger) {
this.memoryBytesLimit = limit.bytes();
this.overheadConstant = overheadConstant;
if (oldBreaker == null) {

View File

@ -19,19 +19,17 @@
package org.elasticsearch.common.component;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.node.Node;
/**
*
*/
public abstract class AbstractComponent {
protected final ESLogger logger;
protected final Logger logger;
protected final DeprecationLogger deprecationLogger;
protected final Settings settings;
@ -42,7 +40,7 @@ public abstract class AbstractComponent {
}
public AbstractComponent(Settings settings, Class customClass) {
this.logger = Loggers.getLogger(customClass, settings);
this.logger = LogManager.getLogger(customClass);
this.deprecationLogger = new DeprecationLogger(logger);
this.settings = settings;
}
@ -71,4 +69,5 @@ public abstract class AbstractComponent {
deprecationLogger.deprecated("Setting [{}] has been removed, use [{}] instead", settingName, alternativeName);
}
}
}

View File

@ -19,20 +19,15 @@
package org.elasticsearch.common.geo.builders;
import org.locationtech.spatial4j.context.jts.JtsSpatialContext;
import org.locationtech.spatial4j.exception.InvalidShapeException;
import org.locationtech.spatial4j.shape.Shape;
import org.locationtech.spatial4j.shape.jts.JtsGeometry;
import com.vividsolutions.jts.geom.Coordinate;
import com.vividsolutions.jts.geom.Geometry;
import com.vividsolutions.jts.geom.GeometryFactory;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.action.support.ToXContentToBytes;
import org.elasticsearch.common.io.stream.NamedWriteable;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.common.unit.DistanceUnit.Distance;
import org.elasticsearch.common.xcontent.ToXContent;
@ -40,6 +35,10 @@ import org.elasticsearch.common.xcontent.XContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.mapper.GeoShapeFieldMapper;
import org.locationtech.spatial4j.context.jts.JtsSpatialContext;
import org.locationtech.spatial4j.exception.InvalidShapeException;
import org.locationtech.spatial4j.shape.Shape;
import org.locationtech.spatial4j.shape.jts.JtsGeometry;
import java.io.IOException;
import java.util.ArrayList;
@ -53,7 +52,7 @@ import java.util.Locale;
*/
public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWriteable {
protected static final ESLogger LOGGER = ESLoggerFactory.getLogger(ShapeBuilder.class.getName());
protected static final Logger LOGGER = ESLoggerFactory.getLogger(ShapeBuilder.class.getName());
private static final boolean DEBUG;
static {

View File

@ -16,6 +16,7 @@
package org.elasticsearch.common.inject.spi;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.inject.Binder;
import org.elasticsearch.common.inject.Binding;
@ -40,7 +41,6 @@ import org.elasticsearch.common.inject.internal.PrivateElementsImpl;
import org.elasticsearch.common.inject.internal.ProviderMethodsModule;
import org.elasticsearch.common.inject.internal.SourceProvider;
import org.elasticsearch.common.inject.matcher.Matcher;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import java.lang.annotation.Annotation;
@ -351,7 +351,7 @@ public final class Elements {
return builder;
}
private static ESLogger logger = Loggers.getLogger(Elements.class);
private static Logger logger = Loggers.getLogger(Elements.class);
protected Object getSource() {
Object ret;

View File

@ -19,8 +19,8 @@
package org.elasticsearch.common.io;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.common.logging.ESLogger;
import java.io.BufferedReader;
import java.io.IOException;
@ -101,7 +101,7 @@ public final class FileSystemUtils {
* Check that a directory exists, is a directory and is readable
* by the current user
*/
public static boolean isAccessibleDirectory(Path directory, ESLogger logger) {
public static boolean isAccessibleDirectory(Path directory, Logger logger) {
assert directory != null && logger != null;
if (!Files.exists(directory)) {

View File

@ -1,260 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.logging;
import org.apache.log4j.Layout;
import org.apache.log4j.WriterAppender;
import org.apache.log4j.helpers.LogLog;
import org.elasticsearch.common.SuppressForbidden;
import java.io.IOException;
import java.io.OutputStream;
/**
* ConsoleAppender appends log events to <code>System.out</code> or
* <code>System.err</code> using a layout specified by the user. The
* default target is <code>System.out</code>.
* <p>Elasticsearch: Adapter from log4j to allow to disable console logging...</p>
*
* @author Ceki G&uuml;lc&uuml;
* @author Curt Arnold
* @since 1.1
*/
public class ConsoleAppender extends WriterAppender {
public static final String SYSTEM_OUT = "System.out";
public static final String SYSTEM_ERR = "System.err";
protected String target = SYSTEM_OUT;
/**
* Determines if the appender honors reassignments of System.out
* or System.err made after configuration.
*/
private boolean follow = true;
/**
* Constructs an unconfigured appender.
*/
public ConsoleAppender() {
}
/**
* Creates a configured appender.
*
* @param layout layout, may not be null.
*/
public ConsoleAppender(Layout layout) {
this(layout, SYSTEM_OUT);
}
/**
* Creates a configured appender.
*
* @param layout layout, may not be null.
* @param target target, either "System.err" or "System.out".
*/
public ConsoleAppender(Layout layout, String target) {
setLayout(layout);
setTarget(target);
activateOptions();
}
/**
* Sets the value of the <b>Target</b> option. Recognized values
* are "System.out" and "System.err". Any other value will be
* ignored.
*/
public void setTarget(String value) {
String v = value.trim();
if (SYSTEM_OUT.equalsIgnoreCase(v)) {
target = SYSTEM_OUT;
} else if (SYSTEM_ERR.equalsIgnoreCase(v)) {
target = SYSTEM_ERR;
} else {
targetWarn(value);
}
}
/**
* Returns the current value of the <b>Target</b> property. The
* default value of the option is "System.out".
* <p>
* See also {@link #setTarget}.
*/
public String getTarget() {
return target;
}
/**
* Sets whether the appender honors reassignments of System.out
* or System.err made after configuration.
*
* @param newValue if true, appender will use value of System.out or
* System.err in force at the time when logging events are appended.
* @since 1.2.13
*/
public final void setFollow(final boolean newValue) {
follow = newValue;
}
/**
* Gets whether the appender honors reassignments of System.out
* or System.err made after configuration.
*
* @return true if appender will use value of System.out or
* System.err in force at the time when logging events are appended.
* @since 1.2.13
*/
public final boolean getFollow() {
return follow;
}
void targetWarn(String val) {
LogLog.warn("[" + val + "] should be System.out or System.err.");
LogLog.warn("Using previously set target, System.out by default.");
}
/**
* Prepares the appender for use.
*/
@Override
@SuppressForbidden(reason = "System#out")
public void activateOptions() {
if (follow) {
if (target.equals(SYSTEM_ERR)) {
setWriter(createWriter(new SystemErrStream()));
} else {
setWriter(createWriter(new SystemOutStream()));
}
} else {
if (target.equals(SYSTEM_ERR)) {
setWriter(createWriter(System.err));
} else {
setWriter(createWriter(System.out));
}
}
super.activateOptions();
}
/**
* {@inheritDoc}
*/
@Override
protected
final void closeWriter() {
if (follow) {
super.closeWriter();
}
}
/**
* An implementation of OutputStream that redirects to the
* current System.err.
*/
@SuppressForbidden(reason = "System#err")
private static class SystemErrStream extends OutputStream {
public SystemErrStream() {
}
@Override
public void close() {
}
@Override
public void flush() {
System.err.flush();
}
@Override
public void write(final byte[] b) throws IOException {
if (!Loggers.consoleLoggingEnabled()) {
return;
}
System.err.write(b);
}
@Override
public void write(final byte[] b, final int off, final int len)
throws IOException {
if (!Loggers.consoleLoggingEnabled()) {
return;
}
System.err.write(b, off, len);
}
@Override
public void write(final int b) throws IOException {
if (!Loggers.consoleLoggingEnabled()) {
return;
}
System.err.write(b);
}
}
/**
* An implementation of OutputStream that redirects to the
* current System.out.
*/
@SuppressForbidden(reason = "System#err")
private static class SystemOutStream extends OutputStream {
public SystemOutStream() {
}
@Override
public void close() {
}
@Override
public void flush() {
System.out.flush();
}
@Override
public void write(final byte[] b) throws IOException {
if (!Loggers.consoleLoggingEnabled()) {
return;
}
System.out.write(b);
}
@Override
public void write(final byte[] b, final int off, final int len)
throws IOException {
if (!Loggers.consoleLoggingEnabled()) {
return;
}
System.out.write(b, off, len);
}
@Override
public void write(final int b) throws IOException {
if (!Loggers.consoleLoggingEnabled()) {
return;
}
System.out.write(b);
}
}
}

View File

@ -19,6 +19,9 @@
package org.elasticsearch.common.logging;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.common.SuppressLoggerChecks;
import org.elasticsearch.common.util.concurrent.ThreadContext;
@ -31,6 +34,12 @@ import java.util.concurrent.CopyOnWriteArraySet;
*/
public class DeprecationLogger {
private final Logger logger;
public Logger getLogger() {
return logger;
}
/**
* The "Warning" Header comes from RFC-7234. As the RFC describes, it's generally used for caching purposes, but it can be
* used for <em>any</em> warning.
@ -84,22 +93,20 @@ public class DeprecationLogger {
}
}
private final ESLogger logger;
/**
* Creates a new deprecation logger based on the parent logger. Automatically
* prefixes the logger name with "deprecation", if it starts with "org.elasticsearch.",
* it replaces "org.elasticsearch" with "org.elasticsearch.deprecation" to maintain
* the "org.elasticsearch" namespace.
*/
public DeprecationLogger(ESLogger parentLogger) {
public DeprecationLogger(Logger parentLogger) {
String name = parentLogger.getName();
if (name.startsWith("org.elasticsearch")) {
name = name.replace("org.elasticsearch.", "org.elasticsearch.deprecation.");
} else {
name = "deprecation." + name;
}
this.logger = ESLoggerFactory.getLogger(parentLogger.getPrefix(), name);
this.logger = LogManager.getLogger(name, parentLogger.getMessageFactory());
}
/**
@ -113,27 +120,27 @@ public class DeprecationLogger {
* Logs a deprecated message to the deprecation log, as well as to the local {@link ThreadContext}.
*
* @param threadContexts The node's {@link ThreadContext} (outside of concurrent tests, this should only ever have one context).
* @param msg The deprecation message.
* @param message The deprecation message.
* @param params The parameters used to fill in the message, if any exist.
*/
@SuppressLoggerChecks(reason = "safely delegates to logger")
void deprecated(Set<ThreadContext> threadContexts, String msg, Object... params) {
void deprecated(Set<ThreadContext> threadContexts, String message, Object... params) {
Iterator<ThreadContext> iterator = threadContexts.iterator();
if (iterator.hasNext()) {
final String formattedMsg = LoggerMessageFormat.format(msg, params);
final String formattedMessage = LoggerMessageFormat.format(message, params);
while (iterator.hasNext()) {
try {
iterator.next().addResponseHeader(DEPRECATION_HEADER, formattedMsg);
iterator.next().addResponseHeader(DEPRECATION_HEADER, formattedMessage);
} catch (IllegalStateException e) {
// ignored; it should be removed shortly
}
}
logger.debug(formattedMsg);
logger.warn(formattedMessage);
} else {
logger.debug(msg, params);
logger.warn(message, params);
}
}

View File

@ -1,208 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.logging;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.elasticsearch.common.SuppressLoggerChecks;
import static org.elasticsearch.common.logging.LoggerMessageFormat.format;
/**
* Elasticsearch's logger wrapper.
*/
@SuppressLoggerChecks(reason = "safely delegates to itself")
public class ESLogger {
private static final String FQCN = ESLogger.class.getName();
private final String prefix;
private final Logger logger;
public ESLogger(String prefix, Logger logger) {
this.prefix = prefix;
this.logger = logger;
}
/**
* The prefix of the log.
*/
public String getPrefix() {
return this.prefix;
}
/**
* Fetch the underlying logger so we can look at it. Only exists for testing.
*/
Logger getLogger() {
return logger;
}
/**
* Set the level of the logger. If the new level is null, the logger will inherit it's level from its nearest ancestor with a non-null
* level.
*/
public void setLevel(String level) {
if (level == null) {
logger.setLevel(null);
} else if ("error".equalsIgnoreCase(level)) {
logger.setLevel(Level.ERROR);
} else if ("warn".equalsIgnoreCase(level)) {
logger.setLevel(Level.WARN);
} else if ("info".equalsIgnoreCase(level)) {
logger.setLevel(Level.INFO);
} else if ("debug".equalsIgnoreCase(level)) {
logger.setLevel(Level.DEBUG);
} else if ("trace".equalsIgnoreCase(level)) {
logger.setLevel(Level.TRACE);
}
}
/**
* The level of this logger. If null then the logger is inheriting it's level from its nearest ancestor with a non-null level.
*/
public String getLevel() {
if (logger.getLevel() == null) {
return null;
}
return logger.getLevel().toString();
}
/**
* The name of this logger.
*/
public String getName() {
return logger.getName();
}
/**
* Returns {@code true} if a TRACE level message should be logged.
*/
public boolean isTraceEnabled() {
return logger.isTraceEnabled();
}
/**
* Returns {@code true} if a DEBUG level message should be logged.
*/
public boolean isDebugEnabled() {
return logger.isDebugEnabled();
}
/**
* Returns {@code true} if an INFO level message should be logged.
*/
public boolean isInfoEnabled() {
return logger.isInfoEnabled();
}
/**
* Returns {@code true} if a WARN level message should be logged.
*/
public boolean isWarnEnabled() {
return logger.isEnabledFor(Level.WARN);
}
/**
* Returns {@code true} if an ERROR level message should be logged.
*/
public boolean isErrorEnabled() {
return logger.isEnabledFor(Level.ERROR);
}
/**
* Logs a TRACE level message.
*/
public void trace(String msg, Object... params) {
trace(msg, null, params);
}
/**
* Logs a TRACE level message with an exception.
*/
public void trace(String msg, Throwable cause, Object... params) {
if (isTraceEnabled()) {
logger.log(FQCN, Level.TRACE, format(prefix, msg, params), cause);
}
}
/**
* Logs a DEBUG level message.
*/
public void debug(String msg, Object... params) {
debug(msg, null, params);
}
/**
* Logs a DEBUG level message with an exception.
*/
public void debug(String msg, Throwable cause, Object... params) {
if (isDebugEnabled()) {
logger.log(FQCN, Level.DEBUG, format(prefix, msg, params), cause);
}
}
/**
* Logs a INFO level message.
*/
public void info(String msg, Object... params) {
info(msg, null, params);
}
/**
* Logs a INFO level message with an exception.
*/
public void info(String msg, Throwable cause, Object... params) {
if (isInfoEnabled()) {
logger.log(FQCN, Level.INFO, format(prefix, msg, params), cause);
}
}
/**
* Logs a WARN level message.
*/
public void warn(String msg, Object... params) {
warn(msg, null, params);
}
/**
* Logs a WARN level message with an exception.
*/
public void warn(String msg, Throwable cause, Object... params) {
if (isWarnEnabled()) {
logger.log(FQCN, Level.WARN, format(prefix, msg, params), cause);
}
}
/**
* Logs a ERROR level message.
*/
public void error(String msg, Object... params) {
error(msg, null, params);
}
/**
* Logs a ERROR level message with an exception.
*/
public void error(String msg, Throwable cause, Object... params) {
if (isErrorEnabled()) {
logger.log(FQCN, Level.ERROR, format(prefix, msg, params), cause);
}
}
}

View File

@ -19,31 +19,46 @@
package org.elasticsearch.common.logging;
import org.apache.log4j.Logger;
import org.apache.logging.log4j.Level;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.MessageFactory;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import java.util.Locale;
import java.util.function.Function;
/**
* Factory to get {@link ESLogger}s
* Factory to get {@link Logger}s
*/
public abstract class ESLoggerFactory {
public static final Setting<LogLevel> LOG_DEFAULT_LEVEL_SETTING =
new Setting<>("logger.level", LogLevel.INFO.name(), LogLevel::parse, Property.NodeScope);
public static final Setting<LogLevel> LOG_LEVEL_SETTING =
Setting.prefixKeySetting("logger.", LogLevel.INFO.name(), LogLevel::parse,
public static final Setting<Level> LOG_DEFAULT_LEVEL_SETTING =
new Setting<>("logger.level", Level.INFO.name(), Level::valueOf, Property.NodeScope);
public static final Setting<Level> LOG_LEVEL_SETTING =
Setting.prefixKeySetting("logger.", Level.INFO.name(), Level::valueOf,
Property.Dynamic, Property.NodeScope);
public static ESLogger getLogger(String prefix, String name) {
prefix = prefix == null ? null : prefix.intern();
public static Logger getLogger(String prefix, String name) {
name = name.intern();
return new ESLogger(prefix, Logger.getLogger(name));
final Logger logger = getLogger(new PrefixMessageFactory(), name);
final MessageFactory factory = logger.getMessageFactory();
// in some cases, we initialize the logger before we are ready to set the prefix
// we can not re-initialize the logger, so the above getLogger might return an existing
// instance without the prefix set; thus, we hack around this by resetting the prefix
if (prefix != null && factory instanceof PrefixMessageFactory) {
((PrefixMessageFactory) factory).setPrefix(prefix.intern());
}
return logger;
}
public static ESLogger getLogger(String name) {
return getLogger(null, name);
public static Logger getLogger(MessageFactory messageFactory, String name) {
return LogManager.getLogger(name, messageFactory);
}
public static Logger getLogger(String name) {
return getLogger((String)null, name);
}
public static DeprecationLogger getDeprecationLogger(String name) {
@ -54,18 +69,12 @@ public abstract class ESLoggerFactory {
return new DeprecationLogger(getLogger(prefix, name));
}
public static ESLogger getRootLogger() {
return new ESLogger(null, Logger.getRootLogger());
public static Logger getRootLogger() {
return LogManager.getRootLogger();
}
private ESLoggerFactory() {
// Utility class can't be built.
}
public enum LogLevel {
WARN, TRACE, INFO, DEBUG, ERROR;
public static LogLevel parse(String level) {
return valueOf(level.toUpperCase(Locale.ROOT));
}
}
}

View File

@ -19,12 +19,20 @@
package org.elasticsearch.common.logging;
import org.apache.log4j.Java9Hack;
import org.apache.log4j.PropertyConfigurator;
import org.apache.lucene.util.Constants;
import org.elasticsearch.ElasticsearchException;
import org.apache.logging.log4j.Level;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.core.LoggerContext;
import org.apache.logging.log4j.core.config.AbstractConfiguration;
import org.apache.logging.log4j.core.config.Configurator;
import org.apache.logging.log4j.core.config.builder.api.ConfigurationBuilder;
import org.apache.logging.log4j.core.config.builder.api.ConfigurationBuilderFactory;
import org.apache.logging.log4j.core.config.builder.impl.BuiltConfiguration;
import org.apache.logging.log4j.core.config.composite.CompositeConfiguration;
import org.apache.logging.log4j.core.config.properties.PropertiesConfiguration;
import org.apache.logging.log4j.core.config.properties.PropertiesConfigurationFactory;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsException;
import org.elasticsearch.env.Environment;
import java.io.IOException;
@ -34,144 +42,62 @@ import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.SimpleFileVisitor;
import java.nio.file.attribute.BasicFileAttributes;
import java.util.Arrays;
import java.util.ArrayList;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import static java.util.Collections.unmodifiableMap;
import static org.elasticsearch.common.Strings.cleanPath;
/**
* Configures log4j with a special set of replacements.
*/
public class LogConfigurator {
static final List<String> ALLOWED_SUFFIXES = Arrays.asList(".yml", ".yaml", ".json", ".properties");
private static final Map<String, String> REPLACEMENTS;
static {
Map<String, String> replacements = new HashMap<>();
// Appenders
replacements.put("async", "org.apache.log4j.AsyncAppender");
replacements.put("console", ConsoleAppender.class.getName());
replacements.put("dailyRollingFile", "org.apache.log4j.DailyRollingFileAppender");
replacements.put("externallyRolledFile", "org.apache.log4j.ExternallyRolledFileAppender");
replacements.put("extrasRollingFile", "org.apache.log4j.rolling.RollingFileAppender");
replacements.put("file", "org.apache.log4j.FileAppender");
replacements.put("jdbc", "org.apache.log4j.jdbc.JDBCAppender");
replacements.put("jms", "org.apache.log4j.net.JMSAppender");
replacements.put("lf5", "org.apache.log4j.lf5.LF5Appender");
replacements.put("ntevent", "org.apache.log4j.nt.NTEventLogAppender");
replacements.put("null", "org.apache.log4j.NullAppender");
replacements.put("rollingFile", "org.apache.log4j.RollingFileAppender");
replacements.put("smtp", "org.apache.log4j.net.SMTPAppender");
replacements.put("socket", "org.apache.log4j.net.SocketAppender");
replacements.put("socketHub", "org.apache.log4j.net.SocketHubAppender");
replacements.put("syslog", "org.apache.log4j.net.SyslogAppender");
replacements.put("telnet", "org.apache.log4j.net.TelnetAppender");
replacements.put("terminal", TerminalAppender.class.getName());
// Policies
replacements.put("timeBased", "org.apache.log4j.rolling.TimeBasedRollingPolicy");
replacements.put("sizeBased", "org.apache.log4j.rolling.SizeBasedTriggeringPolicy");
// Layouts
replacements.put("simple", "org.apache.log4j.SimpleLayout");
replacements.put("html", "org.apache.log4j.HTMLLayout");
replacements.put("pattern", "org.apache.log4j.PatternLayout");
replacements.put("consolePattern", "org.apache.log4j.PatternLayout");
replacements.put("enhancedPattern", "org.apache.log4j.EnhancedPatternLayout");
replacements.put("ttcc", "org.apache.log4j.TTCCLayout");
replacements.put("xml", "org.apache.log4j.XMLLayout");
REPLACEMENTS = unmodifiableMap(replacements);
if (Constants.JRE_IS_MINIMUM_JAVA9) {
Java9Hack.fixLog4j();
}
}
private static boolean loaded;
/**
* Consolidates settings and converts them into actual log4j settings, then initializes loggers and appenders.
* @param settings custom settings that should be applied
* @param resolveConfig controls whether the logging conf file should be read too or not.
*/
public static void configure(Settings settings, boolean resolveConfig) {
if (loaded) {
return;
}
loaded = true;
// TODO: this is partly a copy of InternalSettingsPreparer...we should pass in Environment and not do all this...
Environment environment = new Environment(settings);
Settings.Builder settingsBuilder = Settings.builder();
if (resolveConfig) {
resolveConfig(environment, settingsBuilder);
}
// add custom settings after config was added so that they are not overwritten by config
settingsBuilder.put(settings);
settingsBuilder.replacePropertyPlaceholders();
Properties props = new Properties();
for (Map.Entry<String, String> entry : settingsBuilder.build().getAsMap().entrySet()) {
String key = "log4j." + entry.getKey();
String value = entry.getValue();
value = REPLACEMENTS.getOrDefault(value, value);
if (key.endsWith(".value")) {
props.setProperty(key.substring(0, key.length() - ".value".length()), value);
} else if (key.endsWith(".type")) {
props.setProperty(key.substring(0, key.length() - ".type".length()), value);
} else {
props.setProperty(key, value);
}
}
// ensure explicit path to logs dir exists
props.setProperty("log4j.path.logs", cleanPath(environment.logsFile().toAbsolutePath().toString()));
PropertyConfigurator.configure(props);
// we initialize the status logger immediately otherwise Log4j will complain when we try to get the context
final ConfigurationBuilder<BuiltConfiguration> builder = ConfigurationBuilderFactory.newConfigurationBuilder();
builder.setStatusLevel(Level.ERROR);
Configurator.initialize(builder.build());
}
/**
* sets the loaded flag to false so that logging configuration can be
* overridden. Should only be used in tests.
* for triggering class initialization
*/
static void reset() {
loaded = false;
public static void init() {
}
static void resolveConfig(Environment env, final Settings.Builder settingsBuilder) {
public static void configure(final Environment environment) throws IOException {
final Settings settings = environment.settings();
try {
Set<FileVisitOption> options = EnumSet.of(FileVisitOption.FOLLOW_LINKS);
Files.walkFileTree(env.configFile(), options, Integer.MAX_VALUE, new SimpleFileVisitor<Path>() {
setLogConfigurationSystemProperty(environment, settings);
final LoggerContext context = (LoggerContext) LogManager.getContext(false);
final Set<FileVisitOption> options = EnumSet.of(FileVisitOption.FOLLOW_LINKS);
final List<AbstractConfiguration> configurations = new ArrayList<>();
final PropertiesConfigurationFactory factory = new PropertiesConfigurationFactory();
Files.walkFileTree(environment.configFile(), options, Integer.MAX_VALUE, new SimpleFileVisitor<Path>() {
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
String fileName = file.getFileName().toString();
if (fileName.startsWith("logging.")) {
for (String allowedSuffix : ALLOWED_SUFFIXES) {
if (fileName.endsWith(allowedSuffix)) {
loadConfig(file, settingsBuilder);
break;
}
}
if (file.getFileName().toString().equals("log4j2.properties")) {
configurations.add((PropertiesConfiguration) factory.getConfiguration(file.toString(), file.toUri()));
}
return FileVisitResult.CONTINUE;
}
});
} catch (IOException ioe) {
throw new ElasticsearchException("Failed to load logging configuration", ioe);
context.start(new CompositeConfiguration(configurations));
if (ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING.exists(settings)) {
Loggers.setLevel(ESLoggerFactory.getRootLogger(), ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING.get(settings));
}
final Map<String, String> levels = settings.filter(ESLoggerFactory.LOG_LEVEL_SETTING::match).getAsMap();
for (String key : levels.keySet()) {
final Level level = ESLoggerFactory.LOG_LEVEL_SETTING.getConcreteSetting(key).get(settings);
Loggers.setLevel(Loggers.getLogger(key.substring("logger.".length())), level);
}
}
static void loadConfig(Path file, Settings.Builder settingsBuilder) {
try {
settingsBuilder.loadFromPath(file);
} catch (IOException | SettingsException | NoClassDefFoundError e) {
// ignore
}
@SuppressForbidden(reason = "sets system property for logging configuraton")
private static void setLogConfigurationSystemProperty(Environment environment, Settings settings) {
System.setProperty("es.logs", environment.logsFile().resolve(ClusterName.CLUSTER_NAME_SETTING.get(settings).value()).toString());
}
}

View File

@ -19,15 +19,19 @@
package org.elasticsearch.common.logging;
import org.apache.logging.log4j.Level;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.core.LoggerContext;
import org.apache.logging.log4j.core.config.Configuration;
import org.apache.logging.log4j.core.config.Configurator;
import org.apache.logging.log4j.core.config.LoggerConfig;
import org.apache.logging.log4j.message.MessageFactory;
import org.elasticsearch.common.Classes;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.node.Node;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.List;
@ -39,6 +43,11 @@ import static org.elasticsearch.common.util.CollectionUtils.asArrayList;
*/
public class Loggers {
static {
// ensure that the status logger is configured before we touch any loggers
LogConfigurator.init();
}
private static final String commonPrefix = System.getProperty("es.logger.prefix", "org.elasticsearch.");
public static final String SPACE = " ";
@ -57,51 +66,29 @@ public class Loggers {
return consoleLoggingEnabled;
}
public static ESLogger getLogger(Class<?> clazz, Settings settings, ShardId shardId, String... prefixes) {
public static Logger getLogger(Class<?> clazz, Settings settings, ShardId shardId, String... prefixes) {
return getLogger(clazz, settings, shardId.getIndex(), asArrayList(Integer.toString(shardId.id()), prefixes).toArray(new String[0]));
}
/**
* Just like {@link #getLogger(Class, org.elasticsearch.common.settings.Settings,ShardId,String...)} but String loggerName instead of
* Just like {@link #getLogger(Class, org.elasticsearch.common.settings.Settings, ShardId, String...)} but String loggerName instead of
* Class.
*/
public static ESLogger getLogger(String loggerName, Settings settings, ShardId shardId, String... prefixes) {
public static Logger getLogger(String loggerName, Settings settings, ShardId shardId, String... prefixes) {
return getLogger(loggerName, settings,
asArrayList(shardId.getIndexName(), Integer.toString(shardId.id()), prefixes).toArray(new String[0]));
}
public static ESLogger getLogger(Class<?> clazz, Settings settings, Index index, String... prefixes) {
public static Logger getLogger(Class<?> clazz, Settings settings, Index index, String... prefixes) {
return getLogger(clazz, settings, asArrayList(SPACE, index.getName(), prefixes).toArray(new String[0]));
}
public static ESLogger getLogger(Class<?> clazz, Settings settings, String... prefixes) {
public static Logger getLogger(Class<?> clazz, Settings settings, String... prefixes) {
return getLogger(buildClassLoggerName(clazz), settings, prefixes);
}
@SuppressForbidden(reason = "using localhost for logging on which host it is is fine")
private static InetAddress getHostAddress() {
try {
return InetAddress.getLocalHost();
} catch (UnknownHostException e) {
return null;
}
}
@SuppressForbidden(reason = "do not know what this method does")
public static ESLogger getLogger(String loggerName, Settings settings, String... prefixes) {
public static Logger getLogger(String loggerName, Settings settings, String... prefixes) {
List<String> prefixesList = new ArrayList<>();
if (settings.getAsBoolean("logger.logHostAddress", false)) {
final InetAddress addr = getHostAddress();
if (addr != null) {
prefixesList.add(addr.getHostAddress());
}
}
if (settings.getAsBoolean("logger.logHostName", false)) {
final InetAddress addr = getHostAddress();
if (addr != null) {
prefixesList.add(addr.getHostName());
}
}
if (Node.NODE_NAME_SETTING.exists(settings)) {
prefixesList.add(Node.NODE_NAME_SETTING.get(settings));
}
@ -111,23 +98,23 @@ public class Loggers {
return getLogger(getLoggerName(loggerName), prefixesList.toArray(new String[prefixesList.size()]));
}
public static ESLogger getLogger(ESLogger parentLogger, String s) {
return ESLoggerFactory.getLogger(parentLogger.getPrefix(), getLoggerName(parentLogger.getName() + s));
public static Logger getLogger(Logger parentLogger, String s) {
return ESLoggerFactory.getLogger(parentLogger.<MessageFactory>getMessageFactory(), getLoggerName(parentLogger.getName() + s));
}
public static ESLogger getLogger(String s) {
public static Logger getLogger(String s) {
return ESLoggerFactory.getLogger(getLoggerName(s));
}
public static ESLogger getLogger(Class<?> clazz) {
public static Logger getLogger(Class<?> clazz) {
return ESLoggerFactory.getLogger(getLoggerName(buildClassLoggerName(clazz)));
}
public static ESLogger getLogger(Class<?> clazz, String... prefixes) {
public static Logger getLogger(Class<?> clazz, String... prefixes) {
return getLogger(buildClassLoggerName(clazz), prefixes);
}
public static ESLogger getLogger(String name, String... prefixes) {
public static Logger getLogger(String name, String... prefixes) {
String prefix = null;
if (prefixes != null && prefixes.length > 0) {
StringBuilder sb = new StringBuilder();
@ -148,6 +135,32 @@ public class Loggers {
return ESLoggerFactory.getLogger(prefix, getLoggerName(name));
}
/**
* Set the level of the logger. If the new level is null, the logger will inherit it's level from its nearest ancestor with a non-null
* level.
*/
public static void setLevel(Logger logger, String level) {
final Level l;
if (level == null) {
l = null;
} else {
l = Level.valueOf(level);
}
setLevel(logger, l);
}
public static void setLevel(Logger logger, Level level) {
if (!"".equals(logger.getName())) {
Configurator.setLevel(logger.getName(), level);
} else {
LoggerContext ctx = LoggerContext.getContext(false);
Configuration config = ctx.getConfiguration();
LoggerConfig loggerConfig = config.getLoggerConfig(logger.getName());
loggerConfig.setLevel(level);
ctx.updateLoggers();
}
}
private static String buildClassLoggerName(Class<?> clazz) {
String name = clazz.getName();
if (name.startsWith("org.elasticsearch.")) {
@ -162,4 +175,5 @@ public class Loggers {
}
return commonPrefix + name;
}
}

View File

@ -0,0 +1,221 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.logging;
import org.apache.logging.log4j.message.Message;
import org.apache.logging.log4j.message.MessageFactory2;
import org.apache.logging.log4j.message.ObjectMessage;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.message.SimpleMessage;
public class PrefixMessageFactory implements MessageFactory2 {
private String prefix = "";
public String getPrefix() {
return prefix;
}
public void setPrefix(String prefix) {
this.prefix = prefix;
}
@Override
public Message newMessage(Object message) {
return new PrefixObjectMessage(prefix, message);
}
private static class PrefixObjectMessage extends ObjectMessage {
private final String prefix;
private final Object object;
private String prefixObjectString;
private PrefixObjectMessage(String prefix, Object object) {
super(object);
this.prefix = prefix;
this.object = object;
}
@Override
public String getFormattedMessage() {
if (prefixObjectString == null) {
prefixObjectString = prefix + super.getFormattedMessage();
}
return prefixObjectString;
}
@Override
public void formatTo(StringBuilder buffer) {
buffer.append(prefix);
super.formatTo(buffer);
}
@Override
public Object[] getParameters() {
return new Object[]{prefix, object};
}
}
@Override
public Message newMessage(String message) {
return new PrefixSimpleMessage(prefix, message);
}
private static class PrefixSimpleMessage extends SimpleMessage {
private final String prefix;
private String prefixMessage;
PrefixSimpleMessage(String prefix, String message) {
super(message);
this.prefix = prefix;
}
PrefixSimpleMessage(String prefix, CharSequence charSequence) {
super(charSequence);
this.prefix = prefix;
}
@Override
public String getFormattedMessage() {
if (prefixMessage == null) {
prefixMessage = prefix + super.getFormattedMessage();
}
return prefixMessage;
}
@Override
public void formatTo(StringBuilder buffer) {
buffer.append(prefix);
super.formatTo(buffer);
}
@Override
public int length() {
return prefixMessage.length();
}
@Override
public char charAt(int index) {
return prefixMessage.charAt(index);
}
@Override
public CharSequence subSequence(int start, int end) {
return prefixMessage.subSequence(start, end);
}
}
@Override
public Message newMessage(String message, Object... params) {
return new PrefixParameterizedMessage(prefix, message, params);
}
private static class PrefixParameterizedMessage extends ParameterizedMessage {
private static ThreadLocal<StringBuilder> threadLocalStringBuilder = ThreadLocal.withInitial(StringBuilder::new);
private final String prefix;
private String formattedMessage;
private PrefixParameterizedMessage(String prefix, String messagePattern, Object... arguments) {
super(messagePattern, arguments);
this.prefix = prefix;
}
@Override
public String getFormattedMessage() {
if (formattedMessage == null) {
final StringBuilder buffer = threadLocalStringBuilder.get();
buffer.setLength(0);
formatTo(buffer);
formattedMessage = buffer.toString();
}
return formattedMessage;
}
@Override
public void formatTo(StringBuilder buffer) {
buffer.append(prefix);
super.formatTo(buffer);
}
}
@Override
public Message newMessage(CharSequence charSequence) {
return new PrefixSimpleMessage(prefix, charSequence);
}
@Override
public Message newMessage(String message, Object p0) {
return new PrefixParameterizedMessage(prefix, message, p0);
}
@Override
public Message newMessage(String message, Object p0, Object p1) {
return new PrefixParameterizedMessage(prefix, message, p0, p1);
}
@Override
public Message newMessage(String message, Object p0, Object p1, Object p2) {
return new PrefixParameterizedMessage(prefix, message, p0, p1, p2);
}
@Override
public Message newMessage(String message, Object p0, Object p1, Object p2, Object p3) {
return new PrefixParameterizedMessage(prefix, message, p0, p1, p2, p3);
}
@Override
public Message newMessage(String message, Object p0, Object p1, Object p2, Object p3, Object p4) {
return new PrefixParameterizedMessage(prefix, message, p0, p1, p2, p3, p4);
}
@Override
public Message newMessage(String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5) {
return new PrefixParameterizedMessage(prefix, message, p0, p1, p2, p3, p4, p5);
}
@Override
public Message newMessage(String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5, Object p6) {
return new PrefixParameterizedMessage(prefix, message, p0, p1, p2, p3, p4, p5, p6);
}
@Override
public Message newMessage(String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5, Object p6, Object p7) {
return new PrefixParameterizedMessage(prefix, message, p0, p1, p2, p3, p4, p5, p6, p7);
}
@Override
public Message newMessage(
String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5, Object p6, Object p7, Object p8) {
return new PrefixParameterizedMessage(prefix, message, p0, p1, p2, p3, p4, p5, p6, p7, p8);
}
@Override
public Message newMessage(
String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5, Object p6, Object p7, Object p8, Object p9) {
return new PrefixParameterizedMessage(prefix, message, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9);
}
}

View File

@ -1,44 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.logging;
import org.apache.log4j.AppenderSkeleton;
import org.apache.log4j.spi.LoggingEvent;
import org.elasticsearch.cli.Terminal;
/**
* TerminalAppender logs event to Terminal.DEFAULT. It is used for example by the PluginCli.
* */
public class TerminalAppender extends AppenderSkeleton {
@Override
protected void append(LoggingEvent event) {
Terminal.DEFAULT.println(event.getRenderedMessage());
}
@Override
public void close() {
}
@Override
public boolean requiresLayout() {
return false;
}
}

View File

@ -19,8 +19,8 @@
package org.elasticsearch.common.lucene;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.InfoStream;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
/** An InfoStream (for Lucene's IndexWriter) that redirects
@ -30,12 +30,12 @@ public final class LoggerInfoStream extends InfoStream {
/** Used for component-specific logging: */
/** Logger for everything */
private final ESLogger logger;
private final Logger logger;
/** Logger for IndexFileDeleter */
private final ESLogger ifdLogger;
private final Logger ifdLogger;
public LoggerInfoStream(ESLogger parentLogger) {
public LoggerInfoStream(Logger parentLogger) {
logger = Loggers.getLogger(parentLogger, ".lucene.iw");
ifdLogger = Loggers.getLogger(parentLogger, ".lucene.iw.ifd");
}
@ -52,7 +52,7 @@ public final class LoggerInfoStream extends InfoStream {
return getLogger(component).isTraceEnabled() && component.equals("TP") == false;
}
private ESLogger getLogger(String component) {
private Logger getLogger(String component) {
if (component.equals("IFD")) {
return ifdLogger;
} else {

View File

@ -19,6 +19,8 @@
package org.elasticsearch.common.lucene;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.lucene.analysis.core.KeywordAnalyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.codecs.CodecUtil;
@ -67,7 +69,6 @@ import org.elasticsearch.common.Strings;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.util.iterable.Iterables;
import org.elasticsearch.index.analysis.AnalyzerScope;
import org.elasticsearch.index.analysis.NamedAnalyzer;
@ -104,14 +105,14 @@ public class Lucene {
public static final TopDocs EMPTY_TOP_DOCS = new TopDocs(0, EMPTY_SCORE_DOCS, 0.0f);
public static Version parseVersion(@Nullable String version, Version defaultVersion, ESLogger logger) {
public static Version parseVersion(@Nullable String version, Version defaultVersion, Logger logger) {
if (version == null) {
return defaultVersion;
}
try {
return Version.parse(version);
} catch (ParseException e) {
logger.warn("no version match {}, default to {}", e, version, defaultVersion);
logger.warn(new ParameterizedMessage("no version match {}, default to {}", version, defaultVersion), e);
return defaultVersion;
}
}

View File

@ -19,7 +19,7 @@
package org.elasticsearch.common.network;
import org.elasticsearch.common.logging.ESLogger;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.logging.Loggers;
import java.io.IOException;
@ -36,7 +36,7 @@ import java.util.Locale;
*/
final class IfConfig {
private static final ESLogger logger = Loggers.getLogger(IfConfig.class);
private static final Logger logger = Loggers.getLogger(IfConfig.class);
private static final String INDENT = " ";
/** log interface configuration at debug level, if its enabled */

View File

@ -19,8 +19,8 @@
package org.elasticsearch.common.settings;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.lucene.search.spell.LevensteinDistance;
import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.common.collect.Tuple;
@ -35,7 +35,6 @@ import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.SortedMap;
import java.util.SortedSet;
import java.util.TreeMap;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.function.BiConsumer;
@ -129,7 +128,7 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
settingUpdater.getValue(current, previous);
} catch (RuntimeException ex) {
exceptions.add(ex);
logger.debug("failed to prepareCommit settings for [{}]", ex, settingUpdater);
logger.debug(new ParameterizedMessage("failed to prepareCommit settings for [{}]", settingUpdater), ex);
}
}
// here we are exhaustive and record all settings that failed.
@ -157,7 +156,7 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
try {
applyRunnables.add(settingUpdater.updater(current, previous));
} catch (Exception ex) {
logger.warn("failed to prepareCommit settings for [{}]", ex, settingUpdater);
logger.warn(new ParameterizedMessage("failed to prepareCommit settings for [{}]", settingUpdater), ex);
throw ex;
}
}
@ -521,7 +520,8 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
}
} catch (IllegalArgumentException ex) {
changed = true;
logger.warn("found invalid setting: {} value: {} - archiving",ex , entry.getKey(), entry.getValue());
logger.warn(
new ParameterizedMessage("found invalid setting: {} value: {} - archiving", entry.getKey(), entry.getValue()), ex);
/*
* We put them back in here such that tools can check from the outside if there are any indices with broken settings. The
* setting can remain there but we want users to be aware that some of their setting are broken and they can research why

View File

@ -45,6 +45,7 @@ import org.elasticsearch.cluster.routing.allocation.decider.SnapshotInProgressAl
import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.settings.Setting.Property;
@ -132,7 +133,7 @@ public final class ClusterSettings extends AbstractScopedSettings {
if (ESLoggerFactory.LOG_LEVEL_SETTING.getConcreteSetting(key).exists(settings) == false) {
builder.putNull(key);
} else {
builder.put(key, ESLoggerFactory.LOG_LEVEL_SETTING.getConcreteSetting(key).get(settings).name());
builder.put(key, ESLoggerFactory.LOG_LEVEL_SETTING.getConcreteSetting(key).get(settings));
}
}
}
@ -144,12 +145,18 @@ public final class ClusterSettings extends AbstractScopedSettings {
for (String key : value.getAsMap().keySet()) {
assert loggerPredicate.test(key);
String component = key.substring("logger.".length());
if ("level".equals(component)) {
continue;
}
if ("_root".equals(component)) {
final String rootLevel = value.get(key);
ESLoggerFactory.getRootLogger().setLevel(rootLevel == null ? ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING.get(settings)
.name() : rootLevel);
if (rootLevel == null) {
Loggers.setLevel(ESLoggerFactory.getRootLogger(), ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING.get(settings));
} else {
ESLoggerFactory.getLogger(component).setLevel(value.get(key));
Loggers.setLevel(ESLoggerFactory.getRootLogger(), rootLevel);
}
} else {
Loggers.setLevel(ESLoggerFactory.getLogger(component), value.get(key));
}
}
}

View File

@ -18,6 +18,7 @@
*/
package org.elasticsearch.common.settings;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.action.support.ToXContentToBytes;
@ -26,7 +27,6 @@ import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.unit.ByteSizeValue;
@ -374,7 +374,7 @@ public class Setting<T> extends ToXContentToBytes {
/**
* Build a new updater with a noop validator.
*/
final AbstractScopedSettings.SettingUpdater<T> newUpdater(Consumer<T> consumer, ESLogger logger) {
final AbstractScopedSettings.SettingUpdater<T> newUpdater(Consumer<T> consumer, Logger logger) {
return newUpdater(consumer, logger, (s) -> {});
}
@ -382,7 +382,7 @@ public class Setting<T> extends ToXContentToBytes {
* Build the updater responsible for validating new values, logging the new
* value, and eventually setting the value where it belongs.
*/
AbstractScopedSettings.SettingUpdater<T> newUpdater(Consumer<T> consumer, ESLogger logger, Consumer<T> validator) {
AbstractScopedSettings.SettingUpdater<T> newUpdater(Consumer<T> consumer, Logger logger, Consumer<T> validator) {
if (isDynamic()) {
return new Updater(consumer, logger, validator);
} else {
@ -395,7 +395,7 @@ public class Setting<T> extends ToXContentToBytes {
* and its usage for details.
*/
static <A, B> AbstractScopedSettings.SettingUpdater<Tuple<A, B>> compoundUpdater(final BiConsumer<A, B> consumer,
final Setting<A> aSetting, final Setting<B> bSetting, ESLogger logger) {
final Setting<A> aSetting, final Setting<B> bSetting, Logger logger) {
final AbstractScopedSettings.SettingUpdater<A> aSettingUpdater = aSetting.newUpdater(null, logger);
final AbstractScopedSettings.SettingUpdater<B> bSettingUpdater = bSetting.newUpdater(null, logger);
return new AbstractScopedSettings.SettingUpdater<Tuple<A, B>>() {
@ -424,10 +424,10 @@ public class Setting<T> extends ToXContentToBytes {
private final class Updater implements AbstractScopedSettings.SettingUpdater<T> {
private final Consumer<T> consumer;
private final ESLogger logger;
private final Logger logger;
private final Consumer<T> accept;
public Updater(Consumer<T> consumer, ESLogger logger, Consumer<T> accept) {
public Updater(Consumer<T> consumer, Logger logger, Consumer<T> accept) {
this.consumer = consumer;
this.logger = logger;
this.accept = accept;
@ -707,7 +707,7 @@ public class Setting<T> extends ToXContentToBytes {
}
@Override
public AbstractScopedSettings.SettingUpdater<Settings> newUpdater(Consumer<Settings> consumer, ESLogger logger,
public AbstractScopedSettings.SettingUpdater<Settings> newUpdater(Consumer<Settings> consumer, Logger logger,
Consumer<Settings> validator) {
if (isDynamic() == false) {
throw new IllegalStateException("setting [" + getKey() + "] is not dynamic");
@ -831,7 +831,7 @@ public class Setting<T> extends ToXContentToBytes {
}
@Override
AbstractScopedSettings.SettingUpdater<T> newUpdater(Consumer<T> consumer, ESLogger logger, Consumer<T> validator) {
AbstractScopedSettings.SettingUpdater<T> newUpdater(Consumer<T> consumer, Logger logger, Consumer<T> validator) {
throw new UnsupportedOperationException("Affix settings can't be updated. Use #getConcreteSetting for updating.");
}

View File

@ -19,9 +19,9 @@
package org.elasticsearch.common.settings;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.inject.Binder;
import org.elasticsearch.common.inject.Module;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
@ -51,7 +51,7 @@ public class SettingsModule implements Module {
private final Map<String, Setting<?>> indexSettings = new HashMap<>();
private static final Predicate<String> TRIBE_CLIENT_NODE_SETTINGS_PREDICATE = (s) -> s.startsWith("tribe.")
&& TribeService.TRIBE_SETTING_KEYS.contains(s) == false;
private final ESLogger logger;
private final Logger logger;
private final IndexScopedSettings indexScopedSettings;
private final ClusterSettings clusterSettings;

View File

@ -19,9 +19,9 @@
package org.elasticsearch.common.util;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.NodeEnvironment;
@ -41,7 +41,7 @@ import java.nio.file.StandardCopyOption;
public class IndexFolderUpgrader {
private final NodeEnvironment nodeEnv;
private final Settings settings;
private final ESLogger logger = Loggers.getLogger(IndexFolderUpgrader.class);
private final Logger logger = Loggers.getLogger(IndexFolderUpgrader.class);
/**
* Creates a new upgrader instance

View File

@ -18,8 +18,8 @@
*/
package org.elasticsearch.common.util.concurrent;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.component.Lifecycle;
import org.elasticsearch.common.logging.ESLogger;
import java.util.Objects;
@ -36,7 +36,7 @@ public abstract class AbstractLifecycleRunnable extends AbstractRunnable {
/**
* The service's logger (note: this is passed in!).
*/
private final ESLogger logger;
private final Logger logger;
/**
* {@link AbstractLifecycleRunnable} must be aware of the actual {@code lifecycle} to react properly.
@ -45,7 +45,7 @@ public abstract class AbstractLifecycleRunnable extends AbstractRunnable {
* @param logger The logger to use when logging
* @throws NullPointerException if any parameter is {@code null}
*/
public AbstractLifecycleRunnable(Lifecycle lifecycle, ESLogger logger) {
public AbstractLifecycleRunnable(Lifecycle lifecycle, Logger logger) {
this.lifecycle = Objects.requireNonNull(lifecycle, "lifecycle must not be null");
this.logger = Objects.requireNonNull(logger, "logger must not be null");
}

View File

@ -18,8 +18,8 @@
*/
package org.elasticsearch.common.util.concurrent;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.logging.ESLogger;
import java.io.IOException;
import java.util.ArrayList;
@ -37,11 +37,11 @@ import java.util.function.Consumer;
* might be blocked until other items are processed
*/
public abstract class AsyncIOProcessor<Item> {
private final ESLogger logger;
private final Logger logger;
private final ArrayBlockingQueue<Tuple<Item, Consumer<Exception>>> queue;
private final Semaphore promiseSemaphore = new Semaphore(1);
protected AsyncIOProcessor(ESLogger logger, int queueSize) {
protected AsyncIOProcessor(Logger logger, int queueSize) {
this.logger = logger;
this.queue = new ArrayBlockingQueue<>(queueSize);
}

View File

@ -19,17 +19,15 @@
package org.elasticsearch.common.util.concurrent;
import org.elasticsearch.common.logging.ESLogger;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
/**
*/
public class LoggingRunnable implements Runnable {
private final Runnable runnable;
private final Logger logger;
private final ESLogger logger;
public LoggingRunnable(ESLogger logger, Runnable runnable) {
public LoggingRunnable(Logger logger, Runnable runnable) {
this.runnable = runnable;
this.logger = logger;
}
@ -39,7 +37,8 @@ public class LoggingRunnable implements Runnable {
try {
runnable.run();
} catch (Exception e) {
logger.warn("failed to execute [{}]", e, runnable.toString());
logger.warn(new ParameterizedMessage("failed to execute [{}]", runnable.toString()), e);
}
}
}

View File

@ -18,8 +18,9 @@
*/
package org.elasticsearch.discovery;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.ESLoggerFactory;
import java.util.Set;
@ -31,7 +32,7 @@ import java.util.Set;
*/
public class AckClusterStatePublishResponseHandler extends BlockingClusterStatePublishResponseHandler {
private static final ESLogger logger = ESLoggerFactory.getLogger(AckClusterStatePublishResponseHandler.class.getName());
private static final Logger logger = ESLoggerFactory.getLogger(AckClusterStatePublishResponseHandler.class.getName());
private final Discovery.AckListener ackListener;
@ -68,7 +69,7 @@ public class AckClusterStatePublishResponseHandler extends BlockingClusterStateP
ackListener.onNodeAck(node, e);
} catch (Exception inner) {
inner.addSuppressed(e);
logger.debug("error while processing ack for node [{}]", inner, node);
logger.debug(new ParameterizedMessage("error while processing ack for node [{}]", node), inner);
}
}
}

View File

@ -19,6 +19,7 @@
package org.elasticsearch.discovery.local;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
@ -144,7 +145,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent implements Discov
@Override
public void onFailure(String source, Exception e) {
logger.error("unexpected failure during [{}]", e, source);
logger.error(new ParameterizedMessage("unexpected failure during [{}]", source), e);
}
});
} else if (firstMaster != null) {
@ -173,7 +174,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent implements Discov
@Override
public void onFailure(String source, Exception e) {
logger.error("unexpected failure during [{}]", e, source);
logger.error(new ParameterizedMessage("unexpected failure during [{}]", source), e);
}
});
@ -238,7 +239,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent implements Discov
@Override
public void onFailure(String source, Exception e) {
logger.error("unexpected failure during [{}]", e, source);
logger.error(new ParameterizedMessage("unexpected failure during [{}]", source), e);
}
});
}
@ -329,7 +330,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent implements Discov
newNodeSpecificClusterState = discovery.lastProcessedClusterState.readDiffFrom(StreamInput.wrap(clusterStateDiffBytes)).apply(discovery.lastProcessedClusterState);
logger.trace("sending diff cluster state version [{}] with size {} to [{}]", clusterState.version(), clusterStateDiffBytes.length, discovery.localNode().getName());
} catch (IncompatibleClusterStateVersionException ex) {
logger.warn("incompatible cluster state version [{}] - resending complete cluster state", ex, clusterState.version());
logger.warn(new ParameterizedMessage("incompatible cluster state version [{}] - resending complete cluster state", clusterState.version()), ex);
}
}
if (newNodeSpecificClusterState == null) {
@ -380,7 +381,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent implements Discov
@Override
public void onFailure(String source, Exception e) {
logger.error("unexpected failure during [{}]", e, source);
logger.error(new ParameterizedMessage("unexpected failure during [{}]", source), e);
publishResponseHandler.onFailure(discovery.localNode(), e);
}

View File

@ -18,6 +18,8 @@
*/
package org.elasticsearch.discovery.zen;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.lucene.store.AlreadyClosedException;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterChangedEvent;
@ -34,7 +36,6 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.LocalTransportAddress;
import org.elasticsearch.common.unit.TimeValue;
@ -348,13 +349,13 @@ public class NodeJoinController extends AbstractComponent {
static class JoinTaskListener implements ClusterStateTaskListener {
final List<MembershipAction.JoinCallback> callbacks;
private final ESLogger logger;
private final Logger logger;
JoinTaskListener(MembershipAction.JoinCallback callback, ESLogger logger) {
JoinTaskListener(MembershipAction.JoinCallback callback, Logger logger) {
this(Collections.singletonList(callback), logger);
}
JoinTaskListener(List<MembershipAction.JoinCallback> callbacks, ESLogger logger) {
JoinTaskListener(List<MembershipAction.JoinCallback> callbacks, Logger logger) {
this.callbacks = callbacks;
this.logger = logger;
}
@ -365,7 +366,7 @@ public class NodeJoinController extends AbstractComponent {
try {
callback.onFailure(e);
} catch (Exception inner) {
logger.error("error handling task failure [{}]", inner, e);
logger.error(new ParameterizedMessage("error handling task failure [{}]", e), inner);
}
}
}
@ -376,7 +377,7 @@ public class NodeJoinController extends AbstractComponent {
try {
callback.onSuccess();
} catch (Exception e) {
logger.error("unexpected error during [{}]", e, source);
logger.error(new ParameterizedMessage("unexpected error during [{}]", source), e);
}
}
}

Some files were not shown because too many files have changed in this diff Show More