mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-02-22 12:56:53 +00:00
Avoid allocating log parameterized messages
This commit modifies the call sites that allocate a parameterized message to use a supplier so that allocations are avoided unless the log level is fine enough to emit the corresponding log message.
This commit is contained in:
parent
7da0cdec42
commit
abf8a1a3f0
@ -20,6 +20,7 @@
|
||||
package org.elasticsearch.action.admin.cluster.health;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
@ -106,7 +107,7 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Exception e) {
|
||||
logger.error(new ParameterizedMessage("unexpected failure during [{}]", source), e);
|
||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
|
||||
listener.onFailure(e);
|
||||
}
|
||||
|
||||
|
@ -21,6 +21,7 @@ package org.elasticsearch.action.admin.cluster.reroute;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
||||
@ -104,7 +105,7 @@ public class TransportClusterRerouteAction extends TransportMasterNodeAction<Clu
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Exception e) {
|
||||
logger.debug(new ParameterizedMessage("failed to perform [{}]", source), e);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to perform [{}]", source), e);
|
||||
super.onFailure(source, e);
|
||||
}
|
||||
|
||||
|
@ -20,6 +20,7 @@
|
||||
package org.elasticsearch.action.admin.cluster.settings;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
@ -149,7 +150,7 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct
|
||||
@Override
|
||||
public void onFailure(String source, Exception e) {
|
||||
//if the reroute fails we only log
|
||||
logger.debug(new ParameterizedMessage("failed to perform [{}]", source), e);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to perform [{}]", source), e);
|
||||
listener.onFailure(new ElasticsearchException("reroute after update settings failed", e));
|
||||
}
|
||||
|
||||
@ -167,7 +168,7 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Exception e) {
|
||||
logger.debug(new ParameterizedMessage("failed to perform [{}]", source), e);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to perform [{}]", source), e);
|
||||
super.onFailure(source, e);
|
||||
}
|
||||
|
||||
|
@ -20,6 +20,7 @@
|
||||
package org.elasticsearch.action.admin.indices.close;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.DestructiveOperations;
|
||||
@ -109,7 +110,7 @@ public class TransportCloseIndexAction extends TransportMasterNodeAction<CloseIn
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception t) {
|
||||
logger.debug(new ParameterizedMessage("failed to close indices [{}]", (Object) concreteIndices), t);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to close indices [{}]", (Object) concreteIndices), t);
|
||||
listener.onFailure(t);
|
||||
}
|
||||
});
|
||||
|
@ -20,6 +20,7 @@
|
||||
package org.elasticsearch.action.admin.indices.delete;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.DestructiveOperations;
|
||||
@ -101,7 +102,7 @@ public class TransportDeleteIndexAction extends TransportMasterNodeAction<Delete
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception t) {
|
||||
logger.debug(new ParameterizedMessage("failed to delete indices [{}]", concreteIndices), t);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to delete indices [{}]", concreteIndices), t);
|
||||
listener.onFailure(t);
|
||||
}
|
||||
});
|
||||
|
@ -20,6 +20,7 @@
|
||||
package org.elasticsearch.action.admin.indices.mapping.put;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
||||
@ -93,12 +94,12 @@ public class TransportPutMappingAction extends TransportMasterNodeAction<PutMapp
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception t) {
|
||||
logger.debug(new ParameterizedMessage("failed to put mappings on indices [{}], type [{}]", concreteIndices, request.type()), t);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to put mappings on indices [{}], type [{}]", concreteIndices, request.type()), t);
|
||||
listener.onFailure(t);
|
||||
}
|
||||
});
|
||||
} catch (IndexNotFoundException ex) {
|
||||
logger.debug(new ParameterizedMessage("failed to put mappings on indices [{}], type [{}]", request.indices(), request.type()), ex);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to put mappings on indices [{}], type [{}]", request.indices(), request.type()), ex);
|
||||
throw ex;
|
||||
}
|
||||
}
|
||||
|
@ -20,6 +20,7 @@
|
||||
package org.elasticsearch.action.admin.indices.open;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.DestructiveOperations;
|
||||
@ -94,7 +95,7 @@ public class TransportOpenIndexAction extends TransportMasterNodeAction<OpenInde
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception t) {
|
||||
logger.debug(new ParameterizedMessage("failed to open indices [{}]", (Object) concreteIndices), t);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to open indices [{}]", (Object) concreteIndices), t);
|
||||
listener.onFailure(t);
|
||||
}
|
||||
});
|
||||
|
@ -20,6 +20,7 @@
|
||||
package org.elasticsearch.action.admin.indices.settings.put;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
||||
@ -93,7 +94,7 @@ public class TransportUpdateSettingsAction extends TransportMasterNodeAction<Upd
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception t) {
|
||||
logger.debug(new ParameterizedMessage("failed to update settings on indices [{}]", (Object) concreteIndices), t);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to update settings on indices [{}]", (Object) concreteIndices), t);
|
||||
listener.onFailure(t);
|
||||
}
|
||||
});
|
||||
|
@ -19,6 +19,7 @@
|
||||
package org.elasticsearch.action.admin.indices.template.delete;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
||||
@ -74,7 +75,7 @@ public class TransportDeleteIndexTemplateAction extends TransportMasterNodeActio
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
logger.debug(new ParameterizedMessage("failed to delete templates [{}]", request.name()), e);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to delete templates [{}]", request.name()), e);
|
||||
listener.onFailure(e);
|
||||
}
|
||||
});
|
||||
|
@ -19,6 +19,7 @@
|
||||
package org.elasticsearch.action.admin.indices.template.put;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
||||
@ -95,7 +96,7 @@ public class TransportPutIndexTemplateAction extends TransportMasterNodeAction<P
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
logger.debug(new ParameterizedMessage("failed to put template [{}]", request.name()), e);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to put template [{}]", request.name()), e);
|
||||
listener.onFailure(e);
|
||||
}
|
||||
});
|
||||
|
@ -20,6 +20,7 @@
|
||||
package org.elasticsearch.action.admin.indices.upgrade.post;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
||||
@ -80,7 +81,7 @@ public class TransportUpgradeSettingsAction extends TransportMasterNodeAction<Up
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception t) {
|
||||
logger.debug(new ParameterizedMessage("failed to upgrade minimum compatibility version settings on indices [{}]", request.versions().keySet()), t);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to upgrade minimum compatibility version settings on indices [{}]", request.versions().keySet()), t);
|
||||
listener.onFailure(t);
|
||||
}
|
||||
});
|
||||
|
@ -20,6 +20,7 @@ package org.elasticsearch.action.bulk;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
@ -77,12 +78,12 @@ abstract class BulkRequestHandler {
|
||||
listener.afterBulk(executionId, bulkRequest, bulkResponse);
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
logger.info(new ParameterizedMessage("Bulk request {} has been cancelled.", executionId), e);
|
||||
logger.info((Supplier<?>) () -> new ParameterizedMessage("Bulk request {} has been cancelled.", executionId), e);
|
||||
if (!afterCalled) {
|
||||
listener.afterBulk(executionId, bulkRequest, e);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.warn(new ParameterizedMessage("Failed to execute bulk request {}.", executionId), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("Failed to execute bulk request {}.", executionId), e);
|
||||
if (!afterCalled) {
|
||||
listener.afterBulk(executionId, bulkRequest, e);
|
||||
}
|
||||
@ -143,10 +144,10 @@ abstract class BulkRequestHandler {
|
||||
bulkRequestSetupSuccessful = true;
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
logger.info(new ParameterizedMessage("Bulk request {} has been cancelled.", executionId), e);
|
||||
logger.info((Supplier<?>) () -> new ParameterizedMessage("Bulk request {} has been cancelled.", executionId), e);
|
||||
listener.afterBulk(executionId, bulkRequest, e);
|
||||
} catch (Exception e) {
|
||||
logger.warn(new ParameterizedMessage("Failed to execute bulk request {}.", executionId), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("Failed to execute bulk request {}.", executionId), e);
|
||||
listener.afterBulk(executionId, bulkRequest, e);
|
||||
} finally {
|
||||
if (!bulkRequestSetupSuccessful && acquired) { // if we fail on client.bulk() release the semaphore
|
||||
|
@ -20,6 +20,7 @@
|
||||
package org.elasticsearch.action.bulk;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
@ -184,9 +185,9 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
||||
|
||||
private <ReplicationRequestT extends ReplicationRequest<ReplicationRequestT>> void logFailure(Throwable t, String operation, ShardId shardId, ReplicationRequest<ReplicationRequestT> request) {
|
||||
if (ExceptionsHelper.status(t) == RestStatus.CONFLICT) {
|
||||
logger.trace(new ParameterizedMessage("{} failed to execute bulk item ({}) {}", shardId, operation, request), t);
|
||||
logger.trace((Supplier<?>) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}", shardId, operation, request), t);
|
||||
} else {
|
||||
logger.debug(new ParameterizedMessage("{} failed to execute bulk item ({}) {}", shardId, operation, request), t);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}", shardId, operation, request), t);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -20,6 +20,7 @@
|
||||
package org.elasticsearch.action.get;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.TransportActions;
|
||||
@ -93,7 +94,7 @@ public class TransportShardMultiGetAction extends TransportSingleShardAction<Mul
|
||||
if (TransportActions.isShardNotAvailableException(e)) {
|
||||
throw (ElasticsearchException) e;
|
||||
} else {
|
||||
logger.debug(new ParameterizedMessage("{} failed to execute multi_get for [{}]/[{}]", shardId, item.type(), item.id()), e);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} failed to execute multi_get for [{}]/[{}]", shardId, item.type(), item.id()), e);
|
||||
response.add(request.locations.get(i), new MultiGetResponse.Failure(request.index(), item.type(), item.id(), e));
|
||||
}
|
||||
}
|
||||
|
@ -20,6 +20,7 @@
|
||||
package org.elasticsearch.action.ingest;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
@ -91,7 +92,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio
|
||||
void processIndexRequest(Task task, String action, ActionListener listener, ActionFilterChain chain, IndexRequest indexRequest) {
|
||||
|
||||
executionService.executeIndexRequest(indexRequest, t -> {
|
||||
logger.error(new ParameterizedMessage("failed to execute pipeline [{}]", indexRequest.getPipeline()), t);
|
||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("failed to execute pipeline [{}]", indexRequest.getPipeline()), t);
|
||||
listener.onFailure(t);
|
||||
}, success -> {
|
||||
// TransportIndexAction uses IndexRequest and same action name on the node that receives the request and the node that
|
||||
@ -106,7 +107,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio
|
||||
long ingestStartTimeInNanos = System.nanoTime();
|
||||
BulkRequestModifier bulkRequestModifier = new BulkRequestModifier(original);
|
||||
executionService.executeBulkRequest(() -> bulkRequestModifier, (indexRequest, exception) -> {
|
||||
logger.debug(new ParameterizedMessage("failed to execute pipeline [{}] for document [{}/{}/{}]", indexRequest.getPipeline(), indexRequest.index(), indexRequest.type(), indexRequest.id()), exception);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to execute pipeline [{}] for document [{}/{}/{}]", indexRequest.getPipeline(), indexRequest.index(), indexRequest.type(), indexRequest.id()), exception);
|
||||
bulkRequestModifier.markCurrentItemAsFailed(exception);
|
||||
}, (exception) -> {
|
||||
if (exception != null) {
|
||||
|
@ -22,8 +22,8 @@ package org.elasticsearch.action.search;
|
||||
import com.carrotsearch.hppc.IntArrayList;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.apache.lucene.search.TopDocs;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.NoShardAvailableActionException;
|
||||
import org.elasticsearch.action.support.TransportActions;
|
||||
@ -47,7 +47,6 @@ import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
|
||||
import org.elasticsearch.search.query.QuerySearchResult;
|
||||
import org.elasticsearch.search.query.QuerySearchResultProvider;
|
||||
import org.elasticsearch.search.suggest.Suggest;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.util.List;
|
||||
@ -193,7 +192,10 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
|
||||
} catch (Exception e) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug(
|
||||
new ParameterizedMessage("{}: Failed to execute [{}] while moving to second phase", shardIt.shardId(), request),
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"{}: Failed to execute [{}] while moving to second phase",
|
||||
shardIt.shardId(),
|
||||
request),
|
||||
e);
|
||||
}
|
||||
raiseEarlyFailure(new ReduceSearchPhaseException(firstPhaseName(), "", e, buildShardFailures()));
|
||||
@ -215,20 +217,20 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
|
||||
if (logger.isDebugEnabled()) {
|
||||
if (e != null && !TransportActions.isShardNotAvailableException(e)) {
|
||||
logger.debug(
|
||||
new ParameterizedMessage(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"{}: Failed to execute [{}]",
|
||||
shard != null ? shard.shortSummary() :
|
||||
shardIt.shardId(),
|
||||
request),
|
||||
e);
|
||||
} else if (logger.isTraceEnabled()) {
|
||||
logger.trace(new ParameterizedMessage("{}: Failed to execute [{}]", shard, request), e);
|
||||
logger.trace((Supplier<?>) () -> new ParameterizedMessage("{}: Failed to execute [{}]", shard, request), e);
|
||||
}
|
||||
}
|
||||
final ShardSearchFailure[] shardSearchFailures = buildShardFailures();
|
||||
if (successfulOps.get() == 0) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug(new ParameterizedMessage("All shards failed for phase: [{}]", firstPhaseName()), e);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("All shards failed for phase: [{}]", firstPhaseName()), e);
|
||||
}
|
||||
|
||||
// no successful ops, raise an exception
|
||||
@ -246,12 +248,12 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
|
||||
final boolean lastShard = nextShard == null;
|
||||
// trace log this exception
|
||||
logger.trace(
|
||||
() -> new ParameterizedMessage(
|
||||
"{}: Failed to execute [{}] lastShard [{}]",
|
||||
shard != null ? shard.shortSummary() : shardIt.shardId(),
|
||||
request,
|
||||
lastShard),
|
||||
e);
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"{}: Failed to execute [{}] lastShard [{}]",
|
||||
shard != null ? shard.shortSummary() : shardIt.shardId(),
|
||||
request,
|
||||
lastShard),
|
||||
e);
|
||||
if (!lastShard) {
|
||||
try {
|
||||
performFirstPhase(shardIndex, shardIt, nextShard);
|
||||
@ -264,7 +266,7 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
|
||||
if (logger.isDebugEnabled() && !logger.isTraceEnabled()) { // do not double log this exception
|
||||
if (e != null && !TransportActions.isShardNotAvailableException(e)) {
|
||||
logger.debug(
|
||||
new ParameterizedMessage(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"{}: Failed to execute [{}] lastShard [{}]",
|
||||
shard != null ? shard.shortSummary() :
|
||||
shardIt.shardId(),
|
||||
|
@ -21,6 +21,7 @@ package org.elasticsearch.action.search;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRunnable;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
@ -106,7 +107,7 @@ class SearchDfsQueryAndFetchAsyncAction extends AbstractSearchAsyncAction<DfsSea
|
||||
void onSecondPhaseFailure(Exception e, QuerySearchRequest querySearchRequest, int shardIndex, DfsSearchResult dfsResult,
|
||||
AtomicInteger counter) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug(new ParameterizedMessage("[{}] Failed to execute query phase", querySearchRequest.id()), e);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] Failed to execute query phase", querySearchRequest.id()), e);
|
||||
}
|
||||
this.addShardFailure(shardIndex, dfsResult.shardTarget(), e);
|
||||
successfulOps.decrementAndGet();
|
||||
|
@ -22,6 +22,7 @@ package org.elasticsearch.action.search;
|
||||
import com.carrotsearch.hppc.IntArrayList;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRunnable;
|
||||
@ -114,7 +115,7 @@ class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<DfsSe
|
||||
void onQueryFailure(Exception e, QuerySearchRequest querySearchRequest, int shardIndex, DfsSearchResult dfsResult,
|
||||
AtomicInteger counter) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug(new ParameterizedMessage("[{}] Failed to execute query phase", querySearchRequest.id()), e);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] Failed to execute query phase", querySearchRequest.id()), e);
|
||||
}
|
||||
this.addShardFailure(shardIndex, dfsResult.shardTarget(), e);
|
||||
successfulOps.decrementAndGet();
|
||||
@ -183,7 +184,7 @@ class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<DfsSe
|
||||
void onFetchFailure(Exception e, ShardFetchSearchRequest fetchSearchRequest, int shardIndex,
|
||||
SearchShardTarget shardTarget, AtomicInteger counter) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug(new ParameterizedMessage("[{}] Failed to execute fetch phase", fetchSearchRequest.id()), e);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] Failed to execute fetch phase", fetchSearchRequest.id()), e);
|
||||
}
|
||||
this.addShardFailure(shardIndex, shardTarget, e);
|
||||
successfulOps.decrementAndGet();
|
||||
|
@ -22,6 +22,7 @@ package org.elasticsearch.action.search;
|
||||
import com.carrotsearch.hppc.IntArrayList;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRunnable;
|
||||
@ -116,7 +117,7 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<QuerySea
|
||||
void onFetchFailure(Exception e, ShardFetchSearchRequest fetchSearchRequest, int shardIndex, SearchShardTarget shardTarget,
|
||||
AtomicInteger counter) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug(new ParameterizedMessage("[{}] Failed to execute fetch phase", fetchSearchRequest.id()), e);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] Failed to execute fetch phase", fetchSearchRequest.id()), e);
|
||||
}
|
||||
this.addShardFailure(shardIndex, shardTarget, e);
|
||||
successfulOps.decrementAndGet();
|
||||
|
@ -21,6 +21,7 @@ package org.elasticsearch.action.search;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
@ -147,7 +148,7 @@ class SearchScrollQueryAndFetchAsyncAction extends AbstractAsyncAction {
|
||||
|
||||
private void onPhaseFailure(Exception e, long searchId, int shardIndex) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug(new ParameterizedMessage("[{}] Failed to execute query phase", searchId), e);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] Failed to execute query phase", searchId), e);
|
||||
}
|
||||
addShardFailure(shardIndex, new ShardSearchFailure(e));
|
||||
successfulOps.decrementAndGet();
|
||||
|
@ -22,6 +22,7 @@ package org.elasticsearch.action.search;
|
||||
import com.carrotsearch.hppc.IntArrayList;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
@ -147,7 +148,7 @@ class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction {
|
||||
|
||||
void onQueryPhaseFailure(final int shardIndex, final AtomicInteger counter, final long searchId, Exception failure) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug(new ParameterizedMessage("[{}] Failed to execute query phase", searchId), failure);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] Failed to execute query phase", searchId), failure);
|
||||
}
|
||||
addShardFailure(shardIndex, new ShardSearchFailure(failure));
|
||||
successfulOps.decrementAndGet();
|
||||
|
@ -20,6 +20,7 @@
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
@ -145,7 +146,7 @@ public class TransportClearScrollAction extends HandledTransportAction<ClearScro
|
||||
}
|
||||
|
||||
void onFailedFreedContext(Throwable e, DiscoveryNode node) {
|
||||
logger.warn(new ParameterizedMessage("Clear SC failed on node[{}]", node), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("Clear SC failed on node[{}]", node), e);
|
||||
if (expectedOps.countDown()) {
|
||||
listener.onResponse(new ClearScrollResponse(false, numberOfFreedSearchContexts.get()));
|
||||
} else {
|
||||
|
@ -77,7 +77,11 @@ public abstract class HandledTransportAction<Request extends ActionRequest<Reque
|
||||
channel.sendResponse(e);
|
||||
} catch (Exception e1) {
|
||||
logger.warn(
|
||||
new ParameterizedMessage("Failed to send error response for action [{}] and request [{}]", actionName, request),
|
||||
(org.apache.logging.log4j.util.Supplier<?>)
|
||||
() -> new ParameterizedMessage(
|
||||
"Failed to send error response for action [{}] and request [{}]",
|
||||
actionName,
|
||||
request),
|
||||
e1);
|
||||
}
|
||||
}
|
||||
|
@ -225,7 +225,13 @@ public abstract class TransportBroadcastAction<Request extends BroadcastRequest<
|
||||
if (e != null) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
if (!TransportActions.isShardNotAvailableException(e)) {
|
||||
logger.trace(new ParameterizedMessage("{}: failed to execute [{}]", shard != null ? shard.shortSummary() : shardIt.shardId(), request), e);
|
||||
logger.trace(
|
||||
(org.apache.logging.log4j.util.Supplier<?>)
|
||||
() -> new ParameterizedMessage(
|
||||
"{}: failed to execute [{}]",
|
||||
shard != null ? shard.shortSummary() : shardIt.shardId(),
|
||||
request),
|
||||
e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -234,7 +240,13 @@ public abstract class TransportBroadcastAction<Request extends BroadcastRequest<
|
||||
if (logger.isDebugEnabled()) {
|
||||
if (e != null) {
|
||||
if (!TransportActions.isShardNotAvailableException(e)) {
|
||||
logger.debug(new ParameterizedMessage("{}: failed to execute [{}]", shard != null ? shard.shortSummary() : shardIt.shardId(), request), e);
|
||||
logger.debug(
|
||||
(org.apache.logging.log4j.util.Supplier<?>)
|
||||
() -> new ParameterizedMessage(
|
||||
"{}: failed to execute [{}]",
|
||||
shard != null ? shard.shortSummary() : shardIt.shardId(),
|
||||
request),
|
||||
e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -364,7 +364,9 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
|
||||
protected void onNodeFailure(DiscoveryNode node, int nodeIndex, Throwable t) {
|
||||
String nodeId = node.getId();
|
||||
if (logger.isDebugEnabled() && !(t instanceof NodeShouldNotConnectException)) {
|
||||
logger.debug(new ParameterizedMessage("failed to execute [{}] on node [{}]", actionName, nodeId), t);
|
||||
logger.debug(
|
||||
(org.apache.logging.log4j.util.Supplier<?>)
|
||||
() -> new ParameterizedMessage("failed to execute [{}] on node [{}]", actionName, nodeId), t);
|
||||
}
|
||||
|
||||
// this is defensive to protect against the possibility of double invocation
|
||||
@ -442,11 +444,23 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
|
||||
shardResults[shardIndex] = failure;
|
||||
if (TransportActions.isShardNotAvailableException(e)) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace(new ParameterizedMessage("[{}] failed to execute operation for shard [{}]", actionName, shardRouting.shortSummary()), e);
|
||||
logger.trace(
|
||||
(org.apache.logging.log4j.util.Supplier<?>)
|
||||
() -> new ParameterizedMessage(
|
||||
"[{}] failed to execute operation for shard [{}]",
|
||||
actionName,
|
||||
shardRouting.shortSummary()),
|
||||
e);
|
||||
}
|
||||
} else {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug(new ParameterizedMessage("[{}] failed to execute operation for shard [{}]", actionName, shardRouting.shortSummary()), e);
|
||||
logger.debug(
|
||||
(org.apache.logging.log4j.util.Supplier<?>)
|
||||
() -> new ParameterizedMessage(
|
||||
"[{}] failed to execute operation for shard [{}]",
|
||||
actionName,
|
||||
shardRouting.shortSummary()),
|
||||
e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -156,7 +156,7 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
|
||||
public void onFailure(Exception t) {
|
||||
if (t instanceof Discovery.FailedToCommitClusterStateException
|
||||
|| (t instanceof NotMasterException)) {
|
||||
logger.debug(new ParameterizedMessage("master could not publish cluster state or stepped down before publishing action [{}], scheduling a retry", actionName), t);
|
||||
logger.debug((org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage("master could not publish cluster state or stepped down before publishing action [{}], scheduling a retry", actionName), t);
|
||||
retry(t, MasterNodeChangePredicate.INSTANCE);
|
||||
} else {
|
||||
listener.onFailure(t);
|
||||
@ -210,7 +210,7 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
|
||||
|
||||
@Override
|
||||
public void onTimeout(TimeValue timeout) {
|
||||
logger.debug(new ParameterizedMessage("timed out while retrying [{}] after failure (timeout [{}])", actionName, timeout), failure);
|
||||
logger.debug((org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage("timed out while retrying [{}] after failure (timeout [{}])", actionName, timeout), failure);
|
||||
listener.onFailure(new MasterNotDiscoveredException(failure));
|
||||
}
|
||||
}, changePredicate
|
||||
|
@ -239,7 +239,9 @@ public abstract class TransportNodesAction<NodesRequest extends BaseNodesRequest
|
||||
|
||||
private void onFailure(int idx, String nodeId, Throwable t) {
|
||||
if (logger.isDebugEnabled() && !(t instanceof NodeShouldNotConnectException)) {
|
||||
logger.debug(new ParameterizedMessage("failed to execute on node [{}]", nodeId), t);
|
||||
logger.debug(
|
||||
(org.apache.logging.log4j.util.Supplier<?>)
|
||||
() -> new ParameterizedMessage("failed to execute on node [{}]", nodeId), t);
|
||||
}
|
||||
if (accumulateExceptions()) {
|
||||
responses.set(idx, new FailedNodeException(nodeId, "Failed node [" + nodeId + "]", t));
|
||||
|
@ -191,7 +191,7 @@ public class ReplicationOperation<
|
||||
@Override
|
||||
public void onFailure(Exception replicaException) {
|
||||
logger.trace(
|
||||
new ParameterizedMessage(
|
||||
(org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage(
|
||||
"[{}] failure while performing [{}] on replica {}, request [{}]",
|
||||
shard.shardId(),
|
||||
opType,
|
||||
@ -205,7 +205,9 @@ public class ReplicationOperation<
|
||||
shardReplicaFailures.add(new ReplicationResponse.ShardInfo.Failure(
|
||||
shard.shardId(), shard.currentNodeId(), replicaException, restStatus, false));
|
||||
String message = String.format(Locale.ROOT, "failed to perform %s on replica %s", opType, shard);
|
||||
logger.warn(new ParameterizedMessage("[{}] {}", shard.shardId(), message), replicaException);
|
||||
logger.warn(
|
||||
(org.apache.logging.log4j.util.Supplier<?>)
|
||||
() -> new ParameterizedMessage("[{}] {}", shard.shardId(), message), replicaException);
|
||||
replicasProxy.failShard(shard, replicaRequest.primaryTerm(), message, replicaException,
|
||||
ReplicationOperation.this::decPendingAndFinishIfNeeded,
|
||||
ReplicationOperation.this::onPrimaryDemoted,
|
||||
|
@ -216,7 +216,9 @@ public abstract class TransportReplicationAction<
|
||||
channel.sendResponse(e);
|
||||
} catch (Exception inner) {
|
||||
inner.addSuppressed(e);
|
||||
logger.warn(new ParameterizedMessage("Failed to send response for {}", actionName), inner);
|
||||
logger.warn(
|
||||
(org.apache.logging.log4j.util.Supplier<?>)
|
||||
() -> new ParameterizedMessage("Failed to send response for {}", actionName), inner);
|
||||
}
|
||||
}
|
||||
});
|
||||
@ -446,7 +448,11 @@ public abstract class TransportReplicationAction<
|
||||
public void onFailure(Exception e) {
|
||||
if (e instanceof RetryOnReplicaException) {
|
||||
logger.trace(
|
||||
new ParameterizedMessage("Retrying operation on replica, action [{}], request [{}]", transportReplicaAction, request),
|
||||
(org.apache.logging.log4j.util.Supplier<?>)
|
||||
() -> new ParameterizedMessage(
|
||||
"Retrying operation on replica, action [{}], request [{}]",
|
||||
transportReplicaAction,
|
||||
request),
|
||||
e);
|
||||
final ThreadContext.StoredContext context = threadPool.getThreadContext().newStoredContext();
|
||||
observer.waitForNextChange(new ClusterStateObserver.Listener() {
|
||||
@ -483,7 +489,10 @@ public abstract class TransportReplicationAction<
|
||||
} catch (IOException responseException) {
|
||||
responseException.addSuppressed(e);
|
||||
logger.warn(
|
||||
new ParameterizedMessage("failed to send error message back to client for action [{}]", transportReplicaAction),
|
||||
(org.apache.logging.log4j.util.Supplier<?>)
|
||||
() -> new ParameterizedMessage(
|
||||
"failed to send error message back to client for action [{}]",
|
||||
transportReplicaAction),
|
||||
responseException);
|
||||
}
|
||||
}
|
||||
@ -688,7 +697,7 @@ public abstract class TransportReplicationAction<
|
||||
if (cause instanceof ConnectTransportException || cause instanceof NodeClosedException ||
|
||||
(isPrimaryAction && retryPrimaryException(cause))) {
|
||||
logger.trace(
|
||||
new ParameterizedMessage(
|
||||
(org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage(
|
||||
"received an error from node [{}] for request [{}], scheduling a retry",
|
||||
node.getId(),
|
||||
request),
|
||||
@ -738,7 +747,9 @@ public abstract class TransportReplicationAction<
|
||||
void finishAsFailed(Exception failure) {
|
||||
if (finished.compareAndSet(false, true)) {
|
||||
setPhase(task, "failed");
|
||||
logger.trace(new ParameterizedMessage("operation failed. action [{}], request [{}]", actionName, request), failure);
|
||||
logger.trace(
|
||||
(org.apache.logging.log4j.util.Supplier<?>)
|
||||
() -> new ParameterizedMessage("operation failed. action [{}], request [{}]", actionName, request), failure);
|
||||
listener.onFailure(failure);
|
||||
} else {
|
||||
assert false : "finishAsFailed called but operation is already finished";
|
||||
@ -747,7 +758,11 @@ public abstract class TransportReplicationAction<
|
||||
|
||||
void finishWithUnexpectedFailure(Exception failure) {
|
||||
logger.warn(
|
||||
new ParameterizedMessage("unexpected error during the primary phase for action [{}], request [{}]", actionName, request),
|
||||
(org.apache.logging.log4j.util.Supplier<?>)
|
||||
() -> new ParameterizedMessage(
|
||||
"unexpected error during the primary phase for action [{}], request [{}]",
|
||||
actionName,
|
||||
request),
|
||||
failure);
|
||||
if (finished.compareAndSet(false, true)) {
|
||||
setPhase(task, "failed");
|
||||
|
@ -188,7 +188,9 @@ public abstract class TransportSingleShardAction<Request extends SingleShardRequ
|
||||
|
||||
private void onFailure(ShardRouting shardRouting, Exception e) {
|
||||
if (logger.isTraceEnabled() && e != null) {
|
||||
logger.trace(new ParameterizedMessage("{}: failed to execute [{}]", shardRouting, internalRequest.request()), e);
|
||||
logger.trace(
|
||||
(org.apache.logging.log4j.util.Supplier<?>)
|
||||
() -> new ParameterizedMessage("{}: failed to execute [{}]", shardRouting, internalRequest.request()), e);
|
||||
}
|
||||
perform(e);
|
||||
}
|
||||
@ -206,7 +208,9 @@ public abstract class TransportSingleShardAction<Request extends SingleShardRequ
|
||||
failure = new NoShardAvailableActionException(null, LoggerMessageFormat.format("No shard available for [{}]", internalRequest.request()), failure);
|
||||
} else {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug(new ParameterizedMessage("{}: failed to execute [{}]", null, internalRequest.request()), failure);
|
||||
logger.debug(
|
||||
(org.apache.logging.log4j.util.Supplier<?>)
|
||||
() -> new ParameterizedMessage("{}: failed to execute [{}]", null, internalRequest.request()), failure);
|
||||
}
|
||||
}
|
||||
listener.onFailure(failure);
|
||||
|
@ -276,7 +276,9 @@ public abstract class TransportTasksAction<
|
||||
|
||||
private void onFailure(int idx, String nodeId, Throwable t) {
|
||||
if (logger.isDebugEnabled() && !(t instanceof NodeShouldNotConnectException)) {
|
||||
logger.debug(new ParameterizedMessage("failed to execute on node [{}]", nodeId), t);
|
||||
logger.debug(
|
||||
(org.apache.logging.log4j.util.Supplier<?>)
|
||||
() -> new ParameterizedMessage("failed to execute on node [{}]", nodeId), t);
|
||||
}
|
||||
if (accumulateExceptions()) {
|
||||
responses.set(idx, new FailedNodeException(nodeId, "Failed node [" + nodeId + "]", t));
|
||||
|
@ -20,6 +20,7 @@
|
||||
package org.elasticsearch.action.termvectors;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.TransportActions;
|
||||
@ -88,7 +89,7 @@ public class TransportShardMultiTermsVectorAction extends TransportSingleShardAc
|
||||
if (TransportActions.isShardNotAvailableException(t)) {
|
||||
throw (ElasticsearchException) t;
|
||||
} else {
|
||||
logger.debug(new ParameterizedMessage("{} failed to execute multi term vectors for [{}]/[{}]", shardId, termVectorsRequest.type(), termVectorsRequest.id()), t);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} failed to execute multi term vectors for [{}]/[{}]", shardId, termVectorsRequest.type(), termVectorsRequest.id()), t);
|
||||
response.add(request.locations.get(i),
|
||||
new MultiTermVectorsResponse.Failure(request.index(), termVectorsRequest.type(), termVectorsRequest.id(), t));
|
||||
}
|
||||
|
@ -21,6 +21,7 @@ package org.elasticsearch.bootstrap;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.apache.lucene.util.Constants;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.io.PathUtils;
|
||||
@ -28,7 +29,6 @@ import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.BoundTransportAddress;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
|
||||
import org.elasticsearch.monitor.jvm.JvmInfo;
|
||||
import org.elasticsearch.monitor.process.ProcessProbe;
|
||||
import org.elasticsearch.node.Node;
|
||||
@ -426,11 +426,15 @@ final class BootstrapCheck {
|
||||
try {
|
||||
return parseProcSysVmMaxMapCount(rawProcSysVmMaxMapCount);
|
||||
} catch (final NumberFormatException e) {
|
||||
logger.warn(new ParameterizedMessage("unable to parse vm.max_map_count [{}]", rawProcSysVmMaxMapCount), e);
|
||||
logger.warn(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"unable to parse vm.max_map_count [{}]",
|
||||
rawProcSysVmMaxMapCount),
|
||||
e);
|
||||
}
|
||||
}
|
||||
} catch (final IOException e) {
|
||||
logger.warn(new ParameterizedMessage("I/O exception while trying to read [{}]", path), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("I/O exception while trying to read [{}]", path), e);
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
@ -78,13 +78,16 @@ class ElasticsearchUncaughtExceptionHandler implements Thread.UncaughtExceptionH
|
||||
// visible for testing
|
||||
void onFatalUncaught(final String threadName, final Throwable t) {
|
||||
final Logger logger = Loggers.getLogger(ElasticsearchUncaughtExceptionHandler.class, loggingPrefixSupplier.get());
|
||||
logger.error(new ParameterizedMessage("fatal error in thread [{}], exiting", threadName), t);
|
||||
logger.error(
|
||||
(org.apache.logging.log4j.util.Supplier<?>)
|
||||
() -> new ParameterizedMessage("fatal error in thread [{}], exiting", threadName), t);
|
||||
}
|
||||
|
||||
// visible for testing
|
||||
void onNonFatalUncaught(final String threadName, final Throwable t) {
|
||||
final Logger logger = Loggers.getLogger(ElasticsearchUncaughtExceptionHandler.class, loggingPrefixSupplier.get());
|
||||
logger.warn(new ParameterizedMessage("uncaught exception in thread [{}]", threadName), t);
|
||||
logger.warn((org.apache.logging.log4j.util.Supplier<?>)
|
||||
() -> new ParameterizedMessage("uncaught exception in thread [{}]", threadName), t);
|
||||
}
|
||||
|
||||
// visible for testing
|
||||
|
@ -21,6 +21,7 @@ package org.elasticsearch.client.transport;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
@ -339,7 +340,7 @@ public class TransportClientNodesService extends AbstractComponent implements Cl
|
||||
transportService.connectToNode(node);
|
||||
} catch (Exception e) {
|
||||
it.remove();
|
||||
logger.debug(new ParameterizedMessage("failed to connect to discovered node [{}]", node), e);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to connect to discovered node [{}]", node), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -376,7 +377,9 @@ public class TransportClientNodesService extends AbstractComponent implements Cl
|
||||
logger.trace("connecting to listed node (light) [{}]", listedNode);
|
||||
transportService.connectToNodeLight(listedNode);
|
||||
} catch (Exception e) {
|
||||
logger.debug(new ParameterizedMessage("failed to connect to node [{}], removed from nodes list", listedNode), e);
|
||||
logger.debug(
|
||||
(Supplier<?>)
|
||||
() -> new ParameterizedMessage("failed to connect to node [{}], removed from nodes list", listedNode), e);
|
||||
newFilteredNodes.add(listedNode);
|
||||
continue;
|
||||
}
|
||||
@ -408,7 +411,8 @@ public class TransportClientNodesService extends AbstractComponent implements Cl
|
||||
newNodes.add(listedNode);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.info(new ParameterizedMessage("failed to get node info for {}, disconnecting...", listedNode), e);
|
||||
logger.info(
|
||||
(Supplier<?>) () -> new ParameterizedMessage("failed to get node info for {}, disconnecting...", listedNode), e);
|
||||
transportService.disconnectFromNode(listedNode);
|
||||
}
|
||||
}
|
||||
@ -452,7 +456,9 @@ public class TransportClientNodesService extends AbstractComponent implements Cl
|
||||
transportService.connectToNodeLight(listedNode);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.debug(new ParameterizedMessage("failed to connect to node [{}], ignoring...", listedNode), e);
|
||||
logger.debug(
|
||||
(Supplier<?>)
|
||||
() -> new ParameterizedMessage("failed to connect to node [{}], ignoring...", listedNode), e);
|
||||
latch.countDown();
|
||||
return;
|
||||
}
|
||||
@ -482,17 +488,16 @@ public class TransportClientNodesService extends AbstractComponent implements Cl
|
||||
@Override
|
||||
public void handleException(TransportException e) {
|
||||
logger.info(
|
||||
new ParameterizedMessage(
|
||||
"failed to get local cluster state for {}, disconnecting...",
|
||||
listedNode),
|
||||
e);
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"failed to get local cluster state for {}, disconnecting...", listedNode), e);
|
||||
transportService.disconnectFromNode(listedNode);
|
||||
latch.countDown();
|
||||
}
|
||||
});
|
||||
} catch (Exception e) {
|
||||
logger.info(
|
||||
new ParameterizedMessage("failed to get local cluster state info for {}, disconnecting...", listedNode), e);
|
||||
(Supplier<?>)() -> new ParameterizedMessage(
|
||||
"failed to get local cluster state info for {}, disconnecting...", listedNode), e);
|
||||
transportService.disconnectFromNode(listedNode);
|
||||
latch.countDown();
|
||||
}
|
||||
|
@ -19,6 +19,7 @@
|
||||
package org.elasticsearch.cluster;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
@ -92,7 +93,7 @@ public class NodeConnectionsService extends AbstractLifecycleComponent {
|
||||
try {
|
||||
transportService.disconnectFromNode(node);
|
||||
} catch (Exception e) {
|
||||
logger.warn(new ParameterizedMessage("failed to disconnect to node [{}]", node), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to disconnect to node [{}]", node), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -114,7 +115,11 @@ public class NodeConnectionsService extends AbstractLifecycleComponent {
|
||||
nodeFailureCount = nodeFailureCount + 1;
|
||||
// log every 6th failure
|
||||
if ((nodeFailureCount % 6) == 1) {
|
||||
logger.warn(new ParameterizedMessage("failed to connect to node {} (tried [{}] times)", node, nodeFailureCount), e);
|
||||
final int finalNodeFailureCount = nodeFailureCount;
|
||||
logger.warn(
|
||||
(Supplier<?>)
|
||||
() -> new ParameterizedMessage(
|
||||
"failed to connect to node {} (tried [{}] times)", node, finalNodeFailureCount), e);
|
||||
}
|
||||
nodes.put(node, nodeFailureCount);
|
||||
}
|
||||
|
@ -21,6 +21,7 @@ package org.elasticsearch.cluster.action.shard;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
@ -109,7 +110,7 @@ public class ShardStateAction extends AbstractComponent {
|
||||
if (isMasterChannelException(exp)) {
|
||||
waitForNewMasterAndRetry(actionName, observer, shardEntry, listener);
|
||||
} else {
|
||||
logger.warn(new ParameterizedMessage("{} unexpected failure while sending request [{}] to [{}] for shard entry [{}]", shardEntry.shardId, actionName, masterNode, shardEntry), exp);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("{} unexpected failure while sending request [{}] to [{}] for shard entry [{}]", shardEntry.shardId, actionName, masterNode, shardEntry), exp);
|
||||
listener.onFailure(exp instanceof RemoteTransportException ? (Exception) (exp.getCause() instanceof Exception ? exp.getCause() : new ElasticsearchException(exp.getCause())) : exp);
|
||||
}
|
||||
}
|
||||
@ -170,7 +171,7 @@ public class ShardStateAction extends AbstractComponent {
|
||||
|
||||
@Override
|
||||
public void onClusterServiceClose() {
|
||||
logger.warn(new ParameterizedMessage("{} node closed while execution action [{}] for shard entry [{}]", shardEntry.shardId, actionName, shardEntry), shardEntry.failure);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("{} node closed while execution action [{}] for shard entry [{}]", shardEntry.shardId, actionName, shardEntry), shardEntry.failure);
|
||||
listener.onFailure(new NodeClosedException(clusterService.localNode()));
|
||||
}
|
||||
|
||||
@ -195,7 +196,7 @@ public class ShardStateAction extends AbstractComponent {
|
||||
|
||||
@Override
|
||||
public void messageReceived(ShardEntry request, TransportChannel channel) throws Exception {
|
||||
logger.warn(new ParameterizedMessage("{} received shard failed for {}", request.shardId, request), request.failure);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("{} received shard failed for {}", request.shardId, request), request.failure);
|
||||
clusterService.submitStateUpdateTask(
|
||||
"shard-failed",
|
||||
request,
|
||||
@ -204,12 +205,12 @@ public class ShardStateAction extends AbstractComponent {
|
||||
new ClusterStateTaskListener() {
|
||||
@Override
|
||||
public void onFailure(String source, Exception e) {
|
||||
logger.error(new ParameterizedMessage("{} unexpected failure while failing shard [{}]", request.shardId, request), e);
|
||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("{} unexpected failure while failing shard [{}]", request.shardId, request), e);
|
||||
try {
|
||||
channel.sendResponse(e);
|
||||
} catch (Exception channelException) {
|
||||
channelException.addSuppressed(e);
|
||||
logger.warn(new ParameterizedMessage("{} failed to send failure [{}] while failing shard [{}]", request.shardId, e, request), channelException);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("{} failed to send failure [{}] while failing shard [{}]", request.shardId, e, request), channelException);
|
||||
}
|
||||
}
|
||||
|
||||
@ -219,7 +220,7 @@ public class ShardStateAction extends AbstractComponent {
|
||||
try {
|
||||
channel.sendResponse(new NotMasterException(source));
|
||||
} catch (Exception channelException) {
|
||||
logger.warn(new ParameterizedMessage("{} failed to send no longer master while failing shard [{}]", request.shardId, request), channelException);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("{} failed to send no longer master while failing shard [{}]", request.shardId, request), channelException);
|
||||
}
|
||||
}
|
||||
|
||||
@ -228,7 +229,7 @@ public class ShardStateAction extends AbstractComponent {
|
||||
try {
|
||||
channel.sendResponse(TransportResponse.Empty.INSTANCE);
|
||||
} catch (Exception channelException) {
|
||||
logger.warn(new ParameterizedMessage("{} failed to send response while failing shard [{}]", request.shardId, request), channelException);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("{} failed to send response while failing shard [{}]", request.shardId, request), channelException);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -316,7 +317,7 @@ public class ShardStateAction extends AbstractComponent {
|
||||
}
|
||||
batchResultBuilder.successes(tasksToBeApplied);
|
||||
} catch (Exception e) {
|
||||
logger.warn(new ParameterizedMessage("failed to apply failed shards {}", shardRoutingsToBeApplied), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to apply failed shards {}", shardRoutingsToBeApplied), e);
|
||||
// failures are communicated back to the requester
|
||||
// cluster state will not be updated in this case
|
||||
batchResultBuilder.failures(tasksToBeApplied, e);
|
||||
@ -432,7 +433,7 @@ public class ShardStateAction extends AbstractComponent {
|
||||
}
|
||||
builder.successes(tasksToBeApplied);
|
||||
} catch (Exception e) {
|
||||
logger.warn(new ParameterizedMessage("failed to apply started shards {}", shardRoutingsToBeApplied), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to apply started shards {}", shardRoutingsToBeApplied), e);
|
||||
builder.failures(tasksToBeApplied, e);
|
||||
}
|
||||
|
||||
@ -441,7 +442,7 @@ public class ShardStateAction extends AbstractComponent {
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Exception e) {
|
||||
logger.error(new ParameterizedMessage("unexpected failure during [{}]", source), e);
|
||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -22,6 +22,7 @@ package org.elasticsearch.cluster.metadata;
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.apache.lucene.util.CollectionUtil;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.Version;
|
||||
@ -447,9 +448,9 @@ public class MetaDataCreateIndexService extends AbstractComponent {
|
||||
@Override
|
||||
public void onFailure(String source, Exception e) {
|
||||
if (e instanceof IndexAlreadyExistsException) {
|
||||
logger.trace(new ParameterizedMessage("[{}] failed to create", request.index()), e);
|
||||
logger.trace((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to create", request.index()), e);
|
||||
} else {
|
||||
logger.debug(new ParameterizedMessage("[{}] failed to create", request.index()), e);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to create", request.index()), e);
|
||||
}
|
||||
super.onFailure(source, e);
|
||||
}
|
||||
|
@ -21,6 +21,7 @@ package org.elasticsearch.cluster.metadata;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingClusterStateUpdateRequest;
|
||||
import org.elasticsearch.cluster.AckedClusterStateTaskListener;
|
||||
@ -194,7 +195,7 @@ public class MetaDataMappingService extends AbstractComponent {
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.warn(new ParameterizedMessage("[{}] failed to refresh-mapping in cluster state", index), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to refresh-mapping in cluster state", index), e);
|
||||
}
|
||||
return dirty;
|
||||
}
|
||||
@ -208,7 +209,7 @@ public class MetaDataMappingService extends AbstractComponent {
|
||||
refreshTask,
|
||||
ClusterStateTaskConfig.build(Priority.HIGH),
|
||||
refreshExecutor,
|
||||
(source, e) -> logger.warn(new ParameterizedMessage("failure during [{}]", source), e)
|
||||
(source, e) -> logger.warn((Supplier<?>) () -> new ParameterizedMessage("failure during [{}]", source), e)
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -20,6 +20,7 @@
|
||||
package org.elasticsearch.cluster.routing;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateUpdateTask;
|
||||
@ -114,16 +115,16 @@ public class RoutingService extends AbstractLifecycleComponent {
|
||||
rerouting.set(false);
|
||||
ClusterState state = clusterService.state();
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.error(new ParameterizedMessage("unexpected failure during [{}], current state:\n{}", source, state.prettyPrint()), e);
|
||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}], current state:\n{}", source, state.prettyPrint()), e);
|
||||
} else {
|
||||
logger.error(new ParameterizedMessage("unexpected failure during [{}], current state version [{}]", source, state.version()), e);
|
||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}], current state version [{}]", source, state.version()), e);
|
||||
}
|
||||
}
|
||||
});
|
||||
} catch (Exception e) {
|
||||
rerouting.set(false);
|
||||
ClusterState state = clusterService.state();
|
||||
logger.warn(new ParameterizedMessage("failed to reroute routing table, current state:\n{}", state.prettyPrint()), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to reroute routing table, current state:\n{}", state.prettyPrint()), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -21,6 +21,7 @@ package org.elasticsearch.cluster.service;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.cluster.AckedClusterStateTaskListener;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
@ -556,7 +557,7 @@ public class ClusterService extends AbstractLifecycleComponent {
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS)));
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace(
|
||||
new ParameterizedMessage(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"failed to execute cluster state update in [{}], state:\nversion [{}], source [{}]\n{}{}{}",
|
||||
executionTime,
|
||||
previousClusterState.version(),
|
||||
@ -595,7 +596,9 @@ public class ClusterService extends AbstractLifecycleComponent {
|
||||
executionResult.handle(
|
||||
() -> proccessedListeners.add(updateTask),
|
||||
ex -> {
|
||||
logger.debug(new ParameterizedMessage("cluster state update task {} failed", updateTask.toString(executor)), ex);
|
||||
logger.debug(
|
||||
(Supplier<?>)
|
||||
() -> new ParameterizedMessage("cluster state update task {} failed", updateTask.toString(executor)), ex);
|
||||
updateTask.listener.onFailure(updateTask.source, ex);
|
||||
}
|
||||
);
|
||||
@ -678,11 +681,10 @@ public class ClusterService extends AbstractLifecycleComponent {
|
||||
try {
|
||||
clusterStatePublisher.accept(clusterChangedEvent, ackListener);
|
||||
} catch (Discovery.FailedToCommitClusterStateException t) {
|
||||
final long version = newClusterState.version();
|
||||
logger.warn(
|
||||
new ParameterizedMessage(
|
||||
"failing [{}]: failed to commit cluster state version [{}]",
|
||||
tasksSummary,
|
||||
newClusterState.version()),
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"failing [{}]: failed to commit cluster state version [{}]", tasksSummary, version),
|
||||
t);
|
||||
proccessedListeners.forEach(task -> task.listener.onFailure(task.source, t));
|
||||
return;
|
||||
@ -726,8 +728,9 @@ public class ClusterService extends AbstractLifecycleComponent {
|
||||
try {
|
||||
ackListener.onNodeAck(newClusterState.nodes().getLocalNode(), null);
|
||||
} catch (Exception e) {
|
||||
final DiscoveryNode localNode = newClusterState.nodes().getLocalNode();
|
||||
logger.debug(
|
||||
new ParameterizedMessage("error while processing ack for master node [{}]", newClusterState.nodes().getLocalNode()),
|
||||
(Supplier<?>) () -> new ParameterizedMessage("error while processing ack for master node [{}]", localNode),
|
||||
e);
|
||||
}
|
||||
}
|
||||
@ -740,7 +743,7 @@ public class ClusterService extends AbstractLifecycleComponent {
|
||||
executor.clusterStatePublished(clusterChangedEvent);
|
||||
} catch (Exception e) {
|
||||
logger.error(
|
||||
new ParameterizedMessage(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"exception thrown while notifying executor of new cluster state publication [{}]",
|
||||
tasksSummary),
|
||||
e);
|
||||
@ -752,14 +755,17 @@ public class ClusterService extends AbstractLifecycleComponent {
|
||||
warnAboutSlowTaskIfNeeded(executionTime, tasksSummary);
|
||||
} catch (Exception e) {
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS)));
|
||||
final long version = newClusterState.version();
|
||||
final String stateUUID = newClusterState.stateUUID();
|
||||
final String prettyPrint = newClusterState.prettyPrint();
|
||||
logger.warn(
|
||||
new ParameterizedMessage(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"failed to apply updated cluster state in [{}]:\nversion [{}], uuid [{}], source [{}]\n{}",
|
||||
executionTime,
|
||||
newClusterState.version(),
|
||||
newClusterState.stateUUID(),
|
||||
version,
|
||||
stateUUID,
|
||||
tasksSummary,
|
||||
newClusterState.prettyPrint()),
|
||||
prettyPrint),
|
||||
e);
|
||||
// TODO: do we want to call updateTask.onFailure here?
|
||||
}
|
||||
@ -792,7 +798,9 @@ public class ClusterService extends AbstractLifecycleComponent {
|
||||
listener.onFailure(source, e);
|
||||
} catch (Exception inner) {
|
||||
inner.addSuppressed(e);
|
||||
logger.error(new ParameterizedMessage("exception thrown by listener notifying of failure from [{}]", source), inner);
|
||||
logger.error(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"exception thrown by listener notifying of failure from [{}]", source), inner);
|
||||
}
|
||||
}
|
||||
|
||||
@ -802,8 +810,8 @@ public class ClusterService extends AbstractLifecycleComponent {
|
||||
listener.onNoLongerMaster(source);
|
||||
} catch (Exception e) {
|
||||
logger.error(
|
||||
new ParameterizedMessage("exception thrown by listener while notifying no longer master from [{}]", source),
|
||||
e);
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"exception thrown by listener while notifying no longer master from [{}]", source), e);
|
||||
}
|
||||
}
|
||||
|
||||
@ -813,7 +821,7 @@ public class ClusterService extends AbstractLifecycleComponent {
|
||||
listener.clusterStateProcessed(source, oldState, newState);
|
||||
} catch (Exception e) {
|
||||
logger.error(
|
||||
new ParameterizedMessage(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"exception thrown by listener while notifying of cluster state processed from [{}], old cluster state:\n" +
|
||||
"{}\nnew cluster state:\n{}",
|
||||
source,
|
||||
@ -1070,7 +1078,8 @@ public class ClusterService extends AbstractLifecycleComponent {
|
||||
} else {
|
||||
this.lastFailure = e;
|
||||
logger.debug(
|
||||
new ParameterizedMessage("ack received from node [{}], cluster_state update (version: {})", node, clusterStateVersion),
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"ack received from node [{}], cluster_state update (version: {})", node, clusterStateVersion),
|
||||
e);
|
||||
}
|
||||
|
||||
|
@ -21,6 +21,7 @@ package org.elasticsearch.common.lucene;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.apache.lucene.analysis.core.KeywordAnalyzer;
|
||||
import org.apache.lucene.analysis.standard.StandardAnalyzer;
|
||||
import org.apache.lucene.codecs.CodecUtil;
|
||||
@ -112,7 +113,7 @@ public class Lucene {
|
||||
try {
|
||||
return Version.parse(version);
|
||||
} catch (ParseException e) {
|
||||
logger.warn(new ParameterizedMessage("no version match {}, default to {}", version, defaultVersion), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("no version match {}, default to {}", version, defaultVersion), e);
|
||||
return defaultVersion;
|
||||
}
|
||||
}
|
||||
|
@ -20,6 +20,7 @@
|
||||
package org.elasticsearch.common.settings;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.apache.lucene.search.spell.LevensteinDistance;
|
||||
import org.apache.lucene.util.CollectionUtil;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
@ -128,7 +129,7 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
|
||||
settingUpdater.getValue(current, previous);
|
||||
} catch (RuntimeException ex) {
|
||||
exceptions.add(ex);
|
||||
logger.debug(new ParameterizedMessage("failed to prepareCommit settings for [{}]", settingUpdater), ex);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to prepareCommit settings for [{}]", settingUpdater), ex);
|
||||
}
|
||||
}
|
||||
// here we are exhaustive and record all settings that failed.
|
||||
@ -156,7 +157,8 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
|
||||
try {
|
||||
applyRunnables.add(settingUpdater.updater(current, previous));
|
||||
} catch (Exception ex) {
|
||||
logger.warn(new ParameterizedMessage("failed to prepareCommit settings for [{}]", settingUpdater), ex);
|
||||
logger.warn(
|
||||
(Supplier<?>) () -> new ParameterizedMessage("failed to prepareCommit settings for [{}]", settingUpdater), ex);
|
||||
throw ex;
|
||||
}
|
||||
}
|
||||
@ -521,7 +523,8 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
|
||||
} catch (IllegalArgumentException ex) {
|
||||
changed = true;
|
||||
logger.warn(
|
||||
new ParameterizedMessage("found invalid setting: {} value: {} - archiving", entry.getKey(), entry.getValue()), ex);
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"found invalid setting: {} value: {} - archiving", entry.getKey(), entry.getValue()), ex);
|
||||
/*
|
||||
* We put them back in here such that tools can check from the outside if there are any indices with broken settings. The
|
||||
* setting can remain there but we want users to be aware that some of their setting are broken and they can research why
|
||||
|
@ -21,6 +21,7 @@ package org.elasticsearch.common.util.concurrent;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
|
||||
public class LoggingRunnable implements Runnable {
|
||||
|
||||
@ -37,7 +38,7 @@ public class LoggingRunnable implements Runnable {
|
||||
try {
|
||||
runnable.run();
|
||||
} catch (Exception e) {
|
||||
logger.warn(new ParameterizedMessage("failed to execute [{}]", runnable.toString()), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to execute [{}]", runnable.toString()), e);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -20,6 +20,7 @@ package org.elasticsearch.discovery;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.logging.ESLoggerFactory;
|
||||
|
||||
@ -69,7 +70,7 @@ public class AckClusterStatePublishResponseHandler extends BlockingClusterStateP
|
||||
ackListener.onNodeAck(node, e);
|
||||
} catch (Exception inner) {
|
||||
inner.addSuppressed(e);
|
||||
logger.debug(new ParameterizedMessage("error while processing ack for node [{}]", node), inner);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("error while processing ack for node [{}]", node), inner);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -20,6 +20,7 @@
|
||||
package org.elasticsearch.discovery.local;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
@ -145,7 +146,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent implements Discov
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Exception e) {
|
||||
logger.error(new ParameterizedMessage("unexpected failure during [{}]", source), e);
|
||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
|
||||
}
|
||||
});
|
||||
} else if (firstMaster != null) {
|
||||
@ -174,7 +175,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent implements Discov
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Exception e) {
|
||||
logger.error(new ParameterizedMessage("unexpected failure during [{}]", source), e);
|
||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
|
||||
}
|
||||
|
||||
});
|
||||
@ -239,7 +240,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent implements Discov
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Exception e) {
|
||||
logger.error(new ParameterizedMessage("unexpected failure during [{}]", source), e);
|
||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
|
||||
}
|
||||
});
|
||||
}
|
||||
@ -330,7 +331,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent implements Discov
|
||||
newNodeSpecificClusterState = discovery.lastProcessedClusterState.readDiffFrom(StreamInput.wrap(clusterStateDiffBytes)).apply(discovery.lastProcessedClusterState);
|
||||
logger.trace("sending diff cluster state version [{}] with size {} to [{}]", clusterState.version(), clusterStateDiffBytes.length, discovery.localNode().getName());
|
||||
} catch (IncompatibleClusterStateVersionException ex) {
|
||||
logger.warn(new ParameterizedMessage("incompatible cluster state version [{}] - resending complete cluster state", clusterState.version()), ex);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("incompatible cluster state version [{}] - resending complete cluster state", clusterState.version()), ex);
|
||||
}
|
||||
}
|
||||
if (newNodeSpecificClusterState == null) {
|
||||
@ -381,7 +382,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent implements Discov
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Exception e) {
|
||||
logger.error(new ParameterizedMessage("unexpected failure during [{}]", source), e);
|
||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
|
||||
publishResponseHandler.onFailure(discovery.localNode(), e);
|
||||
}
|
||||
|
||||
|
@ -20,6 +20,7 @@ package org.elasticsearch.discovery.zen;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.apache.lucene.store.AlreadyClosedException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
@ -366,7 +367,7 @@ public class NodeJoinController extends AbstractComponent {
|
||||
try {
|
||||
callback.onFailure(e);
|
||||
} catch (Exception inner) {
|
||||
logger.error(new ParameterizedMessage("error handling task failure [{}]", e), inner);
|
||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("error handling task failure [{}]", e), inner);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -377,7 +378,7 @@ public class NodeJoinController extends AbstractComponent {
|
||||
try {
|
||||
callback.onSuccess();
|
||||
} catch (Exception e) {
|
||||
logger.error(new ParameterizedMessage("unexpected error during [{}]", source), e);
|
||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected error during [{}]", source), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -21,6 +21,7 @@ package org.elasticsearch.discovery.zen;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.Version;
|
||||
@ -260,7 +261,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
||||
try {
|
||||
membership.sendLeaveRequestBlocking(nodes.getMasterNode(), nodes.getLocalNode(), TimeValue.timeValueSeconds(1));
|
||||
} catch (Exception e) {
|
||||
logger.debug(new ParameterizedMessage("failed to send leave request to master [{}]", nodes.getMasterNode()), e);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to send leave request to master [{}]", nodes.getMasterNode()), e);
|
||||
}
|
||||
} else {
|
||||
// we're master -> let other potential master we left and start a master election now rather then wait for masterFD
|
||||
@ -272,7 +273,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
||||
try {
|
||||
membership.sendLeaveRequest(nodes.getLocalNode(), possibleMaster);
|
||||
} catch (Exception e) {
|
||||
logger.debug(new ParameterizedMessage("failed to send leave request from master [{}] to possible master [{}]", nodes.getMasterNode(), possibleMaster), e);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to send leave request from master [{}] to possible master [{}]", nodes.getMasterNode(), possibleMaster), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -331,7 +332,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Exception e) {
|
||||
logger.error(new ParameterizedMessage("unexpected failure during [{}]", source), e);
|
||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
|
||||
}
|
||||
|
||||
});
|
||||
@ -468,7 +469,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
||||
// first, make sure we can connect to the master
|
||||
transportService.connectToNode(masterNode);
|
||||
} catch (Exception e) {
|
||||
logger.warn(new ParameterizedMessage("failed to connect to master [{}], retrying...", masterNode), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to connect to master [{}], retrying...", masterNode), e);
|
||||
return false;
|
||||
}
|
||||
int joinAttempt = 0; // we retry on illegal state if the master is not yet ready
|
||||
@ -488,7 +489,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
||||
}
|
||||
} else {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace(new ParameterizedMessage("failed to send join request to master [{}]", masterNode), e);
|
||||
logger.trace((Supplier<?>) () -> new ParameterizedMessage("failed to send join request to master [{}]", masterNode), e);
|
||||
} else {
|
||||
logger.info("failed to send join request to master [{}], reason [{}]", masterNode, ExceptionsHelper.detailedMessage(e));
|
||||
}
|
||||
@ -586,7 +587,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
||||
|
||||
@Override
|
||||
public void onFailure(final String source, final Exception e) {
|
||||
logger.error(new ParameterizedMessage("unexpected failure during [{}]", source), e);
|
||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -658,7 +659,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Exception e) {
|
||||
logger.error(new ParameterizedMessage("unexpected failure during [{}]", source), e);
|
||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -678,7 +679,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
||||
return;
|
||||
}
|
||||
|
||||
logger.info(new ParameterizedMessage("master_left [{}], reason [{}]", masterNode, reason), cause);
|
||||
logger.info((Supplier<?>) () -> new ParameterizedMessage("master_left [{}], reason [{}]", masterNode, reason), cause);
|
||||
|
||||
clusterService.submitStateUpdateTask("master_failed (" + masterNode + ")", new ClusterStateUpdateTask(Priority.IMMEDIATE) {
|
||||
|
||||
@ -707,7 +708,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Exception e) {
|
||||
logger.error(new ParameterizedMessage("unexpected failure during [{}]", source), e);
|
||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -792,13 +793,13 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Exception e) {
|
||||
logger.error(new ParameterizedMessage("unexpected failure during [{}]", source), e);
|
||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
|
||||
if (newClusterState != null) {
|
||||
try {
|
||||
publishClusterState.pendingStatesQueue().markAsFailed(newClusterState, e);
|
||||
} catch (Exception inner) {
|
||||
inner.addSuppressed(e);
|
||||
logger.error(new ParameterizedMessage("unexpected exception while failing [{}]", source), inner);
|
||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected exception while failing [{}]", source), inner);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -881,7 +882,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
||||
try {
|
||||
membership.sendValidateJoinRequestBlocking(node, state, joinTimeout);
|
||||
} catch (Exception e) {
|
||||
logger.warn(new ParameterizedMessage("failed to validate incoming join request from node [{}]", node), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to validate incoming join request from node [{}]", node), e);
|
||||
callback.onFailure(new IllegalStateException("failure when sending a validation request to node", e));
|
||||
return;
|
||||
}
|
||||
@ -1035,11 +1036,11 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
||||
|
||||
@Override
|
||||
public void handleException(TransportException exp) {
|
||||
logger.warn(new ParameterizedMessage("failed to send rejoin request to [{}]", otherMaster), exp);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to send rejoin request to [{}]", otherMaster), exp);
|
||||
}
|
||||
});
|
||||
} catch (Exception e) {
|
||||
logger.warn(new ParameterizedMessage("failed to send rejoin request to [{}]", otherMaster), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to send rejoin request to [{}]", otherMaster), e);
|
||||
}
|
||||
return localClusterState;
|
||||
}
|
||||
@ -1158,7 +1159,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Exception e) {
|
||||
logger.error(new ParameterizedMessage("unexpected failure during [{}]", source), e);
|
||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
@ -20,6 +20,7 @@
|
||||
package org.elasticsearch.discovery.zen.fd;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
@ -285,7 +286,7 @@ public class MasterFaultDetection extends FaultDetection {
|
||||
|
||||
int retryCount = ++MasterFaultDetection.this.retryCount;
|
||||
logger.trace(
|
||||
new ParameterizedMessage(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"[master] failed to ping [{}], retry [{}] out of [{}]",
|
||||
masterNode,
|
||||
retryCount,
|
||||
|
@ -20,6 +20,7 @@
|
||||
package org.elasticsearch.discovery.zen.fd;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
@ -168,7 +169,7 @@ public class NodesFaultDetection extends FaultDetection {
|
||||
});
|
||||
} catch (EsRejectedExecutionException ex) {
|
||||
logger.trace(
|
||||
new ParameterizedMessage(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"[node ] [{}] ignoring node failure (reason [{}]). Local node is shutting down",
|
||||
node,
|
||||
reason),
|
||||
@ -238,7 +239,7 @@ public class NodesFaultDetection extends FaultDetection {
|
||||
|
||||
retryCount++;
|
||||
logger.trace(
|
||||
new ParameterizedMessage(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"[node ] failed to ping [{}], retry [{}] out of [{}]",
|
||||
node,
|
||||
retryCount,
|
||||
|
@ -21,6 +21,7 @@ package org.elasticsearch.discovery.zen.ping.unicast;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.Version;
|
||||
@ -415,17 +416,17 @@ public class UnicastZenPing extends AbstractLifecycleComponent implements ZenPin
|
||||
} catch (ConnectTransportException e) {
|
||||
// can't connect to the node - this is a more common path!
|
||||
logger.trace(
|
||||
new ParameterizedMessage("[{}] failed to connect to {}", sendPingsHandler.id(), finalNodeToSend), e);
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"[{}] failed to connect to {}", sendPingsHandler.id(), finalNodeToSend), e);
|
||||
} catch (RemoteTransportException e) {
|
||||
// something went wrong on the other side
|
||||
logger.debug(
|
||||
new ParameterizedMessage(
|
||||
"[{}] received a remote error as a response to ping {}",
|
||||
sendPingsHandler.id(),
|
||||
finalNodeToSend),
|
||||
e);
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"[{}] received a remote error as a response to ping {}", sendPingsHandler.id(), finalNodeToSend), e);
|
||||
} catch (Exception e) {
|
||||
logger.warn(new ParameterizedMessage("[{}] failed send ping to {}", sendPingsHandler.id(), finalNodeToSend), e);
|
||||
logger.warn(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"[{}] failed send ping to {}", sendPingsHandler.id(), finalNodeToSend), e);
|
||||
} finally {
|
||||
if (!success) {
|
||||
latch.countDown();
|
||||
@ -492,9 +493,9 @@ public class UnicastZenPing extends AbstractLifecycleComponent implements ZenPin
|
||||
latch.countDown();
|
||||
if (exp instanceof ConnectTransportException) {
|
||||
// ok, not connected...
|
||||
logger.trace(new ParameterizedMessage("failed to connect to {}", nodeToSend), exp);
|
||||
logger.trace((Supplier<?>) () -> new ParameterizedMessage("failed to connect to {}", nodeToSend), exp);
|
||||
} else {
|
||||
logger.warn(new ParameterizedMessage("failed to send ping to [{}]", node), exp);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to send ping to [{}]", node), exp);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
@ -245,7 +245,8 @@ public class PublishClusterStateAction extends AbstractComponent {
|
||||
bytes = serializeFullClusterState(clusterState, node.getVersion());
|
||||
serializedStates.put(node.getVersion(), bytes);
|
||||
} catch (Exception e) {
|
||||
logger.warn(new ParameterizedMessage("failed to serialize cluster_state before publishing it to node {}", node), e);
|
||||
logger.warn(
|
||||
(org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage("failed to serialize cluster_state before publishing it to node {}", node), e);
|
||||
sendingController.onNodeSendFailed(node, e);
|
||||
return;
|
||||
}
|
||||
@ -291,13 +292,14 @@ public class PublishClusterStateAction extends AbstractComponent {
|
||||
logger.debug("resending full cluster state to node {} reason {}", node, exp.getDetailedMessage());
|
||||
sendFullClusterState(clusterState, serializedStates, node, publishTimeout, sendingController);
|
||||
} else {
|
||||
logger.debug(new ParameterizedMessage("failed to send cluster state to {}", node), exp);
|
||||
logger.debug((org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage("failed to send cluster state to {}", node), exp);
|
||||
sendingController.onNodeSendFailed(node, exp);
|
||||
}
|
||||
}
|
||||
});
|
||||
} catch (Exception e) {
|
||||
logger.warn(new ParameterizedMessage("error sending cluster state to {}", node), e);
|
||||
logger.warn(
|
||||
(org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage("error sending cluster state to {}", node), e);
|
||||
sendingController.onNodeSendFailed(node, e);
|
||||
}
|
||||
}
|
||||
@ -323,12 +325,12 @@ public class PublishClusterStateAction extends AbstractComponent {
|
||||
|
||||
@Override
|
||||
public void handleException(TransportException exp) {
|
||||
logger.debug(new ParameterizedMessage("failed to commit cluster state (uuid [{}], version [{}]) to {}", clusterState.stateUUID(), clusterState.version(), node), exp);
|
||||
logger.debug((org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage("failed to commit cluster state (uuid [{}], version [{}]) to {}", clusterState.stateUUID(), clusterState.version(), node), exp);
|
||||
sendingController.getPublishResponseHandler().onFailure(node, exp);
|
||||
}
|
||||
});
|
||||
} catch (Exception t) {
|
||||
logger.warn(new ParameterizedMessage("error sending cluster state commit (uuid [{}], version [{}]) to {}", clusterState.stateUUID(), clusterState.version(), node), t);
|
||||
logger.warn((org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage("error sending cluster state commit (uuid [{}], version [{}]) to {}", clusterState.stateUUID(), clusterState.version(), node), t);
|
||||
sendingController.getPublishResponseHandler().onFailure(node, t);
|
||||
}
|
||||
}
|
||||
@ -627,7 +629,7 @@ public class PublishClusterStateAction extends AbstractComponent {
|
||||
if (committedOrFailed()) {
|
||||
return committed == false;
|
||||
}
|
||||
logger.trace(new ParameterizedMessage("failed to commit version [{}]. {}", clusterState.version(), details), reason);
|
||||
logger.trace((org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage("failed to commit version [{}]. {}", clusterState.version(), details), reason);
|
||||
committed = false;
|
||||
committedOrFailedLatch.countDown();
|
||||
return true;
|
||||
|
@ -21,6 +21,7 @@ package org.elasticsearch.env;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.SegmentInfos;
|
||||
import org.apache.lucene.store.Directory;
|
||||
@ -232,7 +233,8 @@ public final class NodeEnvironment implements Closeable {
|
||||
}
|
||||
|
||||
} catch (IOException e) {
|
||||
startupTraceLogger.trace(new ParameterizedMessage("failed to obtain node lock on {}", dir.toAbsolutePath()), e);
|
||||
startupTraceLogger.trace(
|
||||
(Supplier<?>) () -> new ParameterizedMessage("failed to obtain node lock on {}", dir.toAbsolutePath()), e);
|
||||
lastException = new IOException("failed to obtain lock on " + dir.toAbsolutePath(), e);
|
||||
// release all the ones that were obtained up until now
|
||||
releaseAndNullLocks(locks);
|
||||
@ -885,7 +887,7 @@ public final class NodeEnvironment implements Closeable {
|
||||
logger.trace("releasing lock [{}]", lock);
|
||||
lock.close();
|
||||
} catch (IOException e) {
|
||||
logger.trace(new ParameterizedMessage("failed to release lock [{}]", lock), e);
|
||||
logger.trace((Supplier<?>) () -> new ParameterizedMessage("failed to release lock [{}]", lock), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -21,6 +21,7 @@ package org.elasticsearch.gateway;
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.ElasticsearchTimeoutException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
@ -201,7 +202,7 @@ public abstract class AsyncShardFetch<T extends BaseNodeResponse> implements Rel
|
||||
if (unwrappedCause instanceof EsRejectedExecutionException || unwrappedCause instanceof ReceiveTimeoutTransportException || unwrappedCause instanceof ElasticsearchTimeoutException) {
|
||||
nodeEntry.restartFetching();
|
||||
} else {
|
||||
logger.warn(new ParameterizedMessage("{}: failed to list shard for {} on node [{}]", shardId, type, failure.nodeId()), failure);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("{}: failed to list shard for {} on node [{}]", shardId, type, failure.nodeId()), failure);
|
||||
nodeEntry.doneFetching(failure.getCause());
|
||||
}
|
||||
}
|
||||
|
@ -40,9 +40,6 @@ import org.elasticsearch.indices.IndicesService;
|
||||
import java.util.Arrays;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class Gateway extends AbstractComponent implements ClusterStateListener {
|
||||
|
||||
private final ClusterService clusterService;
|
||||
@ -139,9 +136,10 @@ public class Gateway extends AbstractComponent implements ClusterStateListener {
|
||||
indicesService.verifyIndexMetadata(nodeServicesProvider, electedIndexMetaData, electedIndexMetaData);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
final Index electedIndex = electedIndexMetaData.getIndex();
|
||||
logger.warn(
|
||||
new ParameterizedMessage("recovering index {} failed - recovering as closed", electedIndexMetaData.getIndex()),
|
||||
e);
|
||||
(org.apache.logging.log4j.util.Supplier<?>)
|
||||
() -> new ParameterizedMessage("recovering index {} failed - recovering as closed", electedIndex), e);
|
||||
electedIndexMetaData = IndexMetaData.builder(electedIndexMetaData).state(IndexMetaData.State.CLOSE).build();
|
||||
}
|
||||
|
||||
|
@ -21,6 +21,7 @@ package org.elasticsearch.gateway;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateListener;
|
||||
@ -290,7 +291,7 @@ public class GatewayService extends AbstractLifecycleComponent implements Cluste
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Exception e) {
|
||||
logger.error(new ParameterizedMessage("unexpected failure during [{}]", source), e);
|
||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
|
||||
GatewayRecoveryListener.this.onFailure("failed to updated cluster state");
|
||||
}
|
||||
|
||||
|
@ -20,6 +20,7 @@
|
||||
package org.elasticsearch.gateway;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateUpdateTask;
|
||||
import org.elasticsearch.cluster.block.ClusterBlocks;
|
||||
@ -149,7 +150,7 @@ public class LocalAllocateDangledIndices extends AbstractComponent {
|
||||
upgradedIndexMetaData = metaDataIndexUpgradeService.upgradeIndexMetaData(indexMetaData);
|
||||
} catch (Exception ex) {
|
||||
// upgrade failed - adding index as closed
|
||||
logger.warn(new ParameterizedMessage("found dangled index [{}] on node [{}]. This index cannot be upgraded to the latest version, adding as closed", indexMetaData.getIndex(), request.fromNode), ex);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("found dangled index [{}] on node [{}]. This index cannot be upgraded to the latest version, adding as closed", indexMetaData.getIndex(), request.fromNode), ex);
|
||||
upgradedIndexMetaData = IndexMetaData.builder(indexMetaData).state(IndexMetaData.State.CLOSE).version(indexMetaData.getVersion() + 1).build();
|
||||
}
|
||||
metaData.put(upgradedIndexMetaData, false);
|
||||
@ -176,7 +177,7 @@ public class LocalAllocateDangledIndices extends AbstractComponent {
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Exception e) {
|
||||
logger.error(new ParameterizedMessage("unexpected failure during [{}]", source), e);
|
||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
|
||||
try {
|
||||
channel.sendResponse(e);
|
||||
} catch (Exception inner) {
|
||||
|
@ -20,6 +20,7 @@ package org.elasticsearch.gateway;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.apache.lucene.codecs.CodecUtil;
|
||||
import org.apache.lucene.index.CorruptIndexException;
|
||||
import org.apache.lucene.index.IndexFormatTooNewException;
|
||||
@ -324,7 +325,8 @@ public abstract class MetaDataStateFormat<T> {
|
||||
} catch (Exception e) {
|
||||
exceptions.add(new IOException("failed to read " + pathAndStateId.toString(), e));
|
||||
logger.debug(
|
||||
new ParameterizedMessage("{}: failed to read [{}], ignoring...", pathAndStateId.file.toAbsolutePath(), prefix), e);
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"{}: failed to read [{}], ignoring...", pathAndStateId.file.toAbsolutePath(), prefix), e);
|
||||
}
|
||||
}
|
||||
// if we reach this something went wrong
|
||||
|
@ -20,11 +20,11 @@
|
||||
package org.elasticsearch.gateway;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.index.Index;
|
||||
@ -128,7 +128,7 @@ public class MetaStateService extends AbstractComponent {
|
||||
IndexMetaData.FORMAT.write(indexMetaData,
|
||||
nodeEnv.indexPaths(indexMetaData.getIndex()));
|
||||
} catch (Exception ex) {
|
||||
logger.warn(new ParameterizedMessage("[{}]: failed to write index state", index), ex);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}]: failed to write index state", index), ex);
|
||||
throw new IOException("failed to write state for [" + index + "]", ex);
|
||||
}
|
||||
}
|
||||
|
@ -20,6 +20,7 @@
|
||||
package org.elasticsearch.gateway;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.apache.lucene.util.CollectionUtil;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
@ -227,7 +228,8 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
|
||||
logger.trace("[{}] on node [{}] has no allocation id, out-dated shard (shard state version: [{}])", shard, nodeShardState.getNode(), nodeShardState.legacyVersion());
|
||||
}
|
||||
} else {
|
||||
logger.trace(new ParameterizedMessage("[{}] on node [{}] has allocation id [{}] but the store can not be opened, treating as no allocation id", shard, nodeShardState.getNode(), allocationId), nodeShardState.storeException());
|
||||
final String finalAllocationId = allocationId;
|
||||
logger.trace((Supplier<?>) () -> new ParameterizedMessage("[{}] on node [{}] has allocation id [{}] but the store can not be opened, treating as no allocation id", shard, nodeShardState.getNode(), finalAllocationId), nodeShardState.storeException());
|
||||
allocationId = null;
|
||||
}
|
||||
|
||||
@ -351,8 +353,9 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
|
||||
logger.trace("[{}] on node [{}] has allocation id [{}]", shard, nodeShardState.getNode(), nodeShardState.allocationId());
|
||||
}
|
||||
} else {
|
||||
final long finalVerison = version;
|
||||
// when there is an store exception, we disregard the reported version and assign it as no version (same as shard does not exist)
|
||||
logger.trace(new ParameterizedMessage("[{}] on node [{}] has version [{}] but the store can not be opened, treating no version", shard, nodeShardState.getNode(), version), nodeShardState.storeException());
|
||||
logger.trace((Supplier<?>) () -> new ParameterizedMessage("[{}] on node [{}] has version [{}] but the store can not be opened, treating no version", shard, nodeShardState.getNode(), finalVerison), nodeShardState.storeException());
|
||||
version = ShardStateMetaData.NO_VERSION;
|
||||
}
|
||||
|
||||
|
@ -20,6 +20,7 @@
|
||||
package org.elasticsearch.gateway;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.FailedNodeException;
|
||||
@ -141,12 +142,13 @@ public class TransportNodesListGatewayStartedShards extends
|
||||
}
|
||||
Store.tryOpenIndex(shardPath.resolveIndex(), shardId, nodeEnv::shardLock, logger);
|
||||
} catch (Exception exception) {
|
||||
final ShardPath finalShardPath = shardPath;
|
||||
logger.trace(
|
||||
new ParameterizedMessage(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"{} can't open index for shard [{}] in path [{}]",
|
||||
shardId,
|
||||
shardStateMetaData,
|
||||
(shardPath != null) ? shardPath.resolveIndex() : ""),
|
||||
(finalShardPath != null) ? finalShardPath.resolveIndex() : ""),
|
||||
exception);
|
||||
String allocationId = shardStateMetaData.allocationId != null ?
|
||||
shardStateMetaData.allocationId.getId() : null;
|
||||
|
@ -21,6 +21,7 @@ package org.elasticsearch.index;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
@ -59,7 +60,7 @@ final class CompositeIndexEventListener implements IndexEventListener {
|
||||
try {
|
||||
listener.shardRoutingChanged(indexShard, oldRouting, newRouting);
|
||||
} catch (Exception e) {
|
||||
logger.warn(new ParameterizedMessage("[{}] failed to invoke shard touring changed callback", indexShard.shardId().getId()), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to invoke shard touring changed callback", indexShard.shardId().getId()), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -70,7 +71,7 @@ final class CompositeIndexEventListener implements IndexEventListener {
|
||||
try {
|
||||
listener.afterIndexShardCreated(indexShard);
|
||||
} catch (Exception e) {
|
||||
logger.warn(new ParameterizedMessage("[{}] failed to invoke after shard created callback", indexShard.shardId().getId()), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to invoke after shard created callback", indexShard.shardId().getId()), e);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
@ -82,7 +83,7 @@ final class CompositeIndexEventListener implements IndexEventListener {
|
||||
try {
|
||||
listener.afterIndexShardStarted(indexShard);
|
||||
} catch (Exception e) {
|
||||
logger.warn(new ParameterizedMessage("[{}] failed to invoke after shard started callback", indexShard.shardId().getId()), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to invoke after shard started callback", indexShard.shardId().getId()), e);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
@ -95,7 +96,7 @@ final class CompositeIndexEventListener implements IndexEventListener {
|
||||
try {
|
||||
listener.beforeIndexShardClosed(shardId, indexShard, indexSettings);
|
||||
} catch (Exception e) {
|
||||
logger.warn(new ParameterizedMessage("[{}] failed to invoke before shard closed callback", shardId.getId()), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to invoke before shard closed callback", shardId.getId()), e);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
@ -108,7 +109,7 @@ final class CompositeIndexEventListener implements IndexEventListener {
|
||||
try {
|
||||
listener.afterIndexShardClosed(shardId, indexShard, indexSettings);
|
||||
} catch (Exception e) {
|
||||
logger.warn(new ParameterizedMessage("[{}] failed to invoke after shard closed callback", shardId.getId()), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to invoke after shard closed callback", shardId.getId()), e);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
@ -120,7 +121,7 @@ final class CompositeIndexEventListener implements IndexEventListener {
|
||||
try {
|
||||
listener.onShardInactive(indexShard);
|
||||
} catch (Exception e) {
|
||||
logger.warn(new ParameterizedMessage("[{}] failed to invoke on shard inactive callback", indexShard.shardId().getId()), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to invoke on shard inactive callback", indexShard.shardId().getId()), e);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
@ -132,7 +133,7 @@ final class CompositeIndexEventListener implements IndexEventListener {
|
||||
try {
|
||||
listener.indexShardStateChanged(indexShard, previousState, indexShard.state(), reason);
|
||||
} catch (Exception e) {
|
||||
logger.warn(new ParameterizedMessage("[{}] failed to invoke index shard state changed callback", indexShard.shardId().getId()), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to invoke index shard state changed callback", indexShard.shardId().getId()), e);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
@ -168,7 +169,7 @@ final class CompositeIndexEventListener implements IndexEventListener {
|
||||
try {
|
||||
listener.beforeIndexShardCreated(shardId, indexSettings);
|
||||
} catch (Exception e) {
|
||||
logger.warn(new ParameterizedMessage("[{}] failed to invoke before shard created callback", shardId), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to invoke before shard created callback", shardId), e);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
@ -229,7 +230,7 @@ final class CompositeIndexEventListener implements IndexEventListener {
|
||||
try {
|
||||
listener.beforeIndexShardDeleted(shardId, indexSettings);
|
||||
} catch (Exception e) {
|
||||
logger.warn(new ParameterizedMessage("[{}] failed to invoke before shard deleted callback", shardId.getId()), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to invoke before shard deleted callback", shardId.getId()), e);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
@ -242,7 +243,7 @@ final class CompositeIndexEventListener implements IndexEventListener {
|
||||
try {
|
||||
listener.afterIndexShardDeleted(shardId, indexSettings);
|
||||
} catch (Exception e) {
|
||||
logger.warn(new ParameterizedMessage("[{}] failed to invoke after shard deleted callback", shardId.getId()), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to invoke after shard deleted callback", shardId.getId()), e);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
@ -20,6 +20,7 @@
|
||||
package org.elasticsearch.index;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.search.BooleanClause;
|
||||
import org.apache.lucene.search.BooleanQuery;
|
||||
@ -395,7 +396,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
|
||||
final boolean flushEngine = deleted.get() == false && closed.get();
|
||||
indexShard.close(reason, flushEngine);
|
||||
} catch (Exception e) {
|
||||
logger.debug(new ParameterizedMessage("[{}] failed to close index shard", shardId), e);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to close index shard", shardId), e);
|
||||
// ignore
|
||||
}
|
||||
}
|
||||
@ -406,7 +407,9 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
|
||||
try {
|
||||
store.close();
|
||||
} catch (Exception e) {
|
||||
logger.warn(new ParameterizedMessage("[{}] failed to close store on shard removal (reason: [{}])", shardId, reason), e);
|
||||
logger.warn(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"[{}] failed to close store on shard removal (reason: [{}])", shardId, reason), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -426,7 +429,8 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
|
||||
} catch (IOException e) {
|
||||
shardStoreDeleter.addPendingDelete(lock.getShardId(), indexSettings);
|
||||
logger.debug(
|
||||
new ParameterizedMessage("[{}] failed to delete shard content - scheduled a retry", lock.getShardId().id()), e);
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"[{}] failed to delete shard content - scheduled a retry", lock.getShardId().id()), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -638,7 +642,9 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
|
||||
try {
|
||||
shard.onSettingsChanged();
|
||||
} catch (Exception e) {
|
||||
logger.warn(new ParameterizedMessage("[{}] failed to notify shard about setting change", shard.shardId().id()), e);
|
||||
logger.warn(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"[{}] failed to notify shard about setting change", shard.shardId().id()), e);
|
||||
}
|
||||
}
|
||||
if (refreshTask.getInterval().equals(indexSettings.getRefreshInterval()) == false) {
|
||||
@ -781,7 +787,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
|
||||
if (lastThrownException == null || sameException(lastThrownException, ex) == false) {
|
||||
// prevent the annoying fact of logging the same stuff all the time with an interval of 1 sec will spam all your logs
|
||||
indexService.logger.warn(
|
||||
new ParameterizedMessage(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"failed to run task {} - suppressing re-occurring exceptions unless the exception changes",
|
||||
toString()),
|
||||
ex);
|
||||
|
@ -20,6 +20,7 @@
|
||||
package org.elasticsearch.index;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
@ -144,14 +145,18 @@ public final class IndexWarmer extends AbstractComponent {
|
||||
}
|
||||
|
||||
if (indexShard.warmerService().logger().isTraceEnabled()) {
|
||||
indexShard.warmerService().logger().trace("warmed global ordinals for [{}], took [{}]", fieldType.name(),
|
||||
indexShard.warmerService().logger().trace(
|
||||
"warmed global ordinals for [{}], took [{}]",
|
||||
fieldType.name(),
|
||||
TimeValue.timeValueNanos(System.nanoTime() - start));
|
||||
}
|
||||
} catch (Exception e) {
|
||||
indexShard
|
||||
.warmerService()
|
||||
.logger()
|
||||
.warn(new ParameterizedMessage("failed to warm-up global ordinals for [{}]", fieldType.name()), e);
|
||||
.warn(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"failed to warm-up global ordinals for [{}]", fieldType.name()), e);
|
||||
} finally {
|
||||
latch.countDown();
|
||||
}
|
||||
|
@ -20,6 +20,7 @@
|
||||
package org.elasticsearch.index.cache.bitset;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.apache.lucene.index.IndexReaderContext;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
@ -259,7 +260,7 @@ public final class BitsetFilterCache extends AbstractIndexComponent implements L
|
||||
indexShard.warmerService().logger().trace("warmed bitset for [{}], took [{}]", filterToWarm, TimeValue.timeValueNanos(System.nanoTime() - start));
|
||||
}
|
||||
} catch (Exception e) {
|
||||
indexShard.warmerService().logger().warn(new ParameterizedMessage("failed to load bitset for [{}]", filterToWarm), e);
|
||||
indexShard.warmerService().logger().warn((Supplier<?>) () -> new ParameterizedMessage("failed to load bitset for [{}]", filterToWarm), e);
|
||||
} finally {
|
||||
latch.countDown();
|
||||
}
|
||||
|
@ -21,6 +21,7 @@ package org.elasticsearch.index.engine;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.FilterLeafReader;
|
||||
import org.apache.lucene.index.IndexCommit;
|
||||
@ -365,7 +366,7 @@ public abstract class Engine implements Closeable {
|
||||
throw ex;
|
||||
} catch (Exception ex) {
|
||||
ensureOpen(); // throw EngineCloseException here if we are already closed
|
||||
logger.error(new ParameterizedMessage("failed to acquire searcher, source {}", source), ex);
|
||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("failed to acquire searcher, source {}", source), ex);
|
||||
throw new EngineException(shardId, "failed to acquire searcher, source " + source, ex);
|
||||
} finally {
|
||||
if (!success) { // release the ref in the case of an error...
|
||||
@ -444,7 +445,7 @@ public abstract class Engine implements Closeable {
|
||||
try {
|
||||
directory = engineConfig.getCodec().compoundFormat().getCompoundReader(segmentReader.directory(), segmentCommitInfo.info, IOContext.READ);
|
||||
} catch (IOException e) {
|
||||
logger.warn(new ParameterizedMessage("Error when opening compound reader for Directory [{}] and SegmentCommitInfo [{}]", segmentReader.directory(), segmentCommitInfo), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("Error when opening compound reader for Directory [{}] and SegmentCommitInfo [{}]", segmentReader.directory(), segmentCommitInfo), e);
|
||||
|
||||
return ImmutableOpenMap.of();
|
||||
}
|
||||
@ -459,14 +460,16 @@ public abstract class Engine implements Closeable {
|
||||
try {
|
||||
files = directory.listAll();
|
||||
} catch (IOException e) {
|
||||
logger.warn(new ParameterizedMessage("Couldn't list Compound Reader Directory [{}]", directory), e);
|
||||
final Directory finalDirectory = directory;
|
||||
logger.warn(
|
||||
(Supplier<?>) () -> new ParameterizedMessage("Couldn't list Compound Reader Directory [{}]", finalDirectory), e);
|
||||
return ImmutableOpenMap.of();
|
||||
}
|
||||
} else {
|
||||
try {
|
||||
files = segmentReader.getSegmentInfo().files().toArray(new String[]{});
|
||||
} catch (IOException e) {
|
||||
logger.warn(new ParameterizedMessage("Couldn't list Directory from SegmentReader [{}] and SegmentInfo [{}]", segmentReader, segmentReader.getSegmentInfo()), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("Couldn't list Directory from SegmentReader [{}] and SegmentInfo [{}]", segmentReader, segmentReader.getSegmentInfo()), e);
|
||||
return ImmutableOpenMap.of();
|
||||
}
|
||||
}
|
||||
@ -480,7 +483,10 @@ public abstract class Engine implements Closeable {
|
||||
} catch (NoSuchFileException | FileNotFoundException e) {
|
||||
logger.warn("Tried to query fileLength but file is gone [{}] [{}]", e, directory, file);
|
||||
} catch (IOException e) {
|
||||
logger.warn(new ParameterizedMessage("Error when trying to query fileLength [{}] [{}]", directory, file), e);
|
||||
final Directory finalDirectory = directory;
|
||||
logger.warn(
|
||||
(Supplier<?>)
|
||||
() -> new ParameterizedMessage("Error when trying to query fileLength [{}] [{}]", finalDirectory, file), e);
|
||||
}
|
||||
if (length == 0L) {
|
||||
continue;
|
||||
@ -492,7 +498,10 @@ public abstract class Engine implements Closeable {
|
||||
try {
|
||||
directory.close();
|
||||
} catch (IOException e) {
|
||||
logger.warn(new ParameterizedMessage("Error when closing compound reader on Directory [{}]", directory), e);
|
||||
final Directory finalDirectory = directory;
|
||||
logger.warn(
|
||||
(Supplier<?>)
|
||||
() -> new ParameterizedMessage("Error when closing compound reader on Directory [{}]", finalDirectory), e);
|
||||
}
|
||||
}
|
||||
|
||||
@ -527,7 +536,7 @@ public abstract class Engine implements Closeable {
|
||||
try {
|
||||
segment.sizeInBytes = info.sizeInBytes();
|
||||
} catch (IOException e) {
|
||||
logger.trace(new ParameterizedMessage("failed to get size for [{}]", info.info.name), e);
|
||||
logger.trace((Supplier<?>) () -> new ParameterizedMessage("failed to get size for [{}]", info.info.name), e);
|
||||
}
|
||||
final SegmentReader segmentReader = segmentReader(reader.reader());
|
||||
segment.memoryInBytes = segmentReader.ramBytesUsed();
|
||||
@ -557,7 +566,7 @@ public abstract class Engine implements Closeable {
|
||||
try {
|
||||
segment.sizeInBytes = info.sizeInBytes();
|
||||
} catch (IOException e) {
|
||||
logger.trace(new ParameterizedMessage("failed to get size for [{}]", info.info.name), e);
|
||||
logger.trace((Supplier<?>) () -> new ParameterizedMessage("failed to get size for [{}]", info.info.name), e);
|
||||
}
|
||||
segments.put(info.info.name, segment);
|
||||
} else {
|
||||
@ -669,10 +678,10 @@ public abstract class Engine implements Closeable {
|
||||
closeNoLock("engine failed on: [" + reason + "]");
|
||||
} finally {
|
||||
if (failedEngine != null) {
|
||||
logger.debug(new ParameterizedMessage("tried to fail engine but engine is already failed. ignoring. [{}]", reason), failure);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("tried to fail engine but engine is already failed. ignoring. [{}]", reason), failure);
|
||||
return;
|
||||
}
|
||||
logger.warn(new ParameterizedMessage("failed engine [{}]", reason), failure);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed engine [{}]", reason), failure);
|
||||
// we must set a failure exception, generate one if not supplied
|
||||
failedEngine = (failure != null) ? failure : new IllegalStateException(reason);
|
||||
// we first mark the store as corrupted before we notify any listeners
|
||||
@ -696,7 +705,7 @@ public abstract class Engine implements Closeable {
|
||||
store.decRef();
|
||||
}
|
||||
} else {
|
||||
logger.debug(new ParameterizedMessage("tried to fail engine but could not acquire lock - engine should be failed by now [{}]", reason), failure);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("tried to fail engine but could not acquire lock - engine should be failed by now [{}]", reason), failure);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -217,7 +217,10 @@ public class MapperService extends AbstractIndexComponent {
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.warn(new ParameterizedMessage("[{}] failed to add mapping [{}], source [{}]", index(), mappingType, mappingSource), e);
|
||||
logger.warn(
|
||||
(org.apache.logging.log4j.util.Supplier<?>)
|
||||
() -> new ParameterizedMessage("[{}] failed to add mapping [{}], source [{}]", index(), mappingType, mappingSource),
|
||||
e);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
@ -20,6 +20,7 @@ package org.elasticsearch.index.shard;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
|
||||
import java.util.List;
|
||||
@ -83,7 +84,7 @@ public interface IndexingOperationListener {
|
||||
try {
|
||||
listener.preIndex(operation);
|
||||
} catch (Exception e) {
|
||||
logger.warn(new ParameterizedMessage("preIndex listener [{}] failed", listener), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("preIndex listener [{}] failed", listener), e);
|
||||
}
|
||||
}
|
||||
return operation;
|
||||
@ -96,7 +97,7 @@ public interface IndexingOperationListener {
|
||||
try {
|
||||
listener.postIndex(index, created);
|
||||
} catch (Exception e) {
|
||||
logger.warn(new ParameterizedMessage("postIndex listener [{}] failed", listener), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("postIndex listener [{}] failed", listener), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -109,7 +110,7 @@ public interface IndexingOperationListener {
|
||||
listener.postIndex(index, ex);
|
||||
} catch (Exception inner) {
|
||||
inner.addSuppressed(ex);
|
||||
logger.warn(new ParameterizedMessage("postIndex listener [{}] failed", listener), inner);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("postIndex listener [{}] failed", listener), inner);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -121,7 +122,7 @@ public interface IndexingOperationListener {
|
||||
try {
|
||||
listener.preDelete(delete);
|
||||
} catch (Exception e) {
|
||||
logger.warn(new ParameterizedMessage("preDelete listener [{}] failed", listener), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("preDelete listener [{}] failed", listener), e);
|
||||
}
|
||||
}
|
||||
return delete;
|
||||
@ -134,7 +135,7 @@ public interface IndexingOperationListener {
|
||||
try {
|
||||
listener.postDelete(delete);
|
||||
} catch (Exception e) {
|
||||
logger.warn(new ParameterizedMessage("postDelete listener [{}] failed", listener), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("postDelete listener [{}] failed", listener), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -147,7 +148,7 @@ public interface IndexingOperationListener {
|
||||
listener.postDelete(delete, ex);
|
||||
} catch (Exception inner) {
|
||||
inner.addSuppressed(ex);
|
||||
logger.warn(new ParameterizedMessage("postDelete listener [{}] failed", listener), inner);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("postDelete listener [{}] failed", listener), inner);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -20,6 +20,7 @@ package org.elasticsearch.index.shard;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
|
||||
import java.util.List;
|
||||
@ -121,7 +122,7 @@ public interface SearchOperationListener {
|
||||
try {
|
||||
listener.onPreQueryPhase(searchContext);
|
||||
} catch (Exception e) {
|
||||
logger.warn(new ParameterizedMessage("onPreQueryPhase listener [{}] failed", listener), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("onPreQueryPhase listener [{}] failed", listener), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -132,7 +133,7 @@ public interface SearchOperationListener {
|
||||
try {
|
||||
listener.onFailedQueryPhase(searchContext);
|
||||
} catch (Exception e) {
|
||||
logger.warn(new ParameterizedMessage("onFailedQueryPhase listener [{}] failed", listener), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("onFailedQueryPhase listener [{}] failed", listener), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -143,7 +144,7 @@ public interface SearchOperationListener {
|
||||
try {
|
||||
listener.onQueryPhase(searchContext, tookInNanos);
|
||||
} catch (Exception e) {
|
||||
logger.warn(new ParameterizedMessage("onQueryPhase listener [{}] failed", listener), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("onQueryPhase listener [{}] failed", listener), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -154,7 +155,7 @@ public interface SearchOperationListener {
|
||||
try {
|
||||
listener.onPreFetchPhase(searchContext);
|
||||
} catch (Exception e) {
|
||||
logger.warn(new ParameterizedMessage("onPreFetchPhase listener [{}] failed", listener), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("onPreFetchPhase listener [{}] failed", listener), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -165,7 +166,7 @@ public interface SearchOperationListener {
|
||||
try {
|
||||
listener.onFailedFetchPhase(searchContext);
|
||||
} catch (Exception e) {
|
||||
logger.warn(new ParameterizedMessage("onFailedFetchPhase listener [{}] failed", listener), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("onFailedFetchPhase listener [{}] failed", listener), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -176,7 +177,7 @@ public interface SearchOperationListener {
|
||||
try {
|
||||
listener.onFetchPhase(searchContext, tookInNanos);
|
||||
} catch (Exception e) {
|
||||
logger.warn(new ParameterizedMessage("onFetchPhase listener [{}] failed", listener), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("onFetchPhase listener [{}] failed", listener), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -187,7 +188,7 @@ public interface SearchOperationListener {
|
||||
try {
|
||||
listener.onNewContext(context);
|
||||
} catch (Exception e) {
|
||||
logger.warn(new ParameterizedMessage("onNewContext listener [{}] failed", listener), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("onNewContext listener [{}] failed", listener), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -198,7 +199,7 @@ public interface SearchOperationListener {
|
||||
try {
|
||||
listener.onFreeContext(context);
|
||||
} catch (Exception e) {
|
||||
logger.warn(new ParameterizedMessage("onFreeContext listener [{}] failed", listener), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("onFreeContext listener [{}] failed", listener), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -209,7 +210,7 @@ public interface SearchOperationListener {
|
||||
try {
|
||||
listener.onNewScrollContext(context);
|
||||
} catch (Exception e) {
|
||||
logger.warn(new ParameterizedMessage("onNewScrollContext listener [{}] failed", listener), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("onNewScrollContext listener [{}] failed", listener), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -220,7 +221,7 @@ public interface SearchOperationListener {
|
||||
try {
|
||||
listener.onFreeScrollContext(context);
|
||||
} catch (Exception e) {
|
||||
logger.warn(new ParameterizedMessage("onFreeScrollContext listener [{}] failed", listener), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("onFreeScrollContext listener [{}] failed", listener), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -21,6 +21,7 @@ package org.elasticsearch.index.store;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.apache.lucene.codecs.CodecUtil;
|
||||
import org.apache.lucene.index.CorruptIndexException;
|
||||
import org.apache.lucene.index.IndexCommit;
|
||||
@ -280,7 +281,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
||||
directory.deleteFile(origFile);
|
||||
} catch (FileNotFoundException | NoSuchFileException e) {
|
||||
} catch (Exception ex) {
|
||||
logger.debug(new ParameterizedMessage("failed to delete file [{}]", origFile), ex);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to delete file [{}]", origFile), ex);
|
||||
}
|
||||
// now, rename the files... and fail it it won't work
|
||||
directory.rename(tempFile, origFile);
|
||||
@ -401,7 +402,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
||||
try {
|
||||
tryOpenIndex(indexLocation, shardId, shardLocker, logger);
|
||||
} catch (Exception ex) {
|
||||
logger.trace(new ParameterizedMessage("Can't open index for path [{}]", indexLocation), ex);
|
||||
logger.trace((Supplier<?>) () -> new ParameterizedMessage("Can't open index for path [{}]", indexLocation), ex);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
@ -606,7 +607,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
||||
// if one of those files can't be deleted we better fail the cleanup otherwise we might leave an old commit point around?
|
||||
throw new IllegalStateException("Can't delete " + existingFile + " - cleanup failed", ex);
|
||||
}
|
||||
logger.debug(new ParameterizedMessage("failed to delete file [{}]", existingFile), ex);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to delete file [{}]", existingFile), ex);
|
||||
// ignore, we don't really care, will get deleted later on
|
||||
}
|
||||
}
|
||||
@ -824,7 +825,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
||||
// Lucene checks the checksum after it tries to lookup the codec etc.
|
||||
// in that case we might get only IAE or similar exceptions while we are really corrupt...
|
||||
// TODO we should check the checksum in lucene if we hit an exception
|
||||
logger.warn(new ParameterizedMessage("failed to build store metadata. checking segment info integrity (with commit [{}])", commit == null ? "no" : "yes"), ex);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to build store metadata. checking segment info integrity (with commit [{}])", commit == null ? "no" : "yes"), ex);
|
||||
Lucene.checkSegmentInfoIntegrity(directory);
|
||||
} catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException cex) {
|
||||
cex.addSuppressed(ex);
|
||||
@ -859,7 +860,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
||||
}
|
||||
|
||||
} catch (Exception ex) {
|
||||
logger.debug(new ParameterizedMessage("Can retrieve checksum from file [{}]", file), ex);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("Can retrieve checksum from file [{}]", file), ex);
|
||||
throw ex;
|
||||
}
|
||||
builder.put(file, new StoreFileMetaData(file, length, checksum, version, fileHash.get()));
|
||||
|
@ -20,6 +20,7 @@
|
||||
package org.elasticsearch.index.translog;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.TwoPhaseCommit;
|
||||
import org.apache.lucene.store.AlreadyClosedException;
|
||||
@ -258,7 +259,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
||||
try {
|
||||
Files.delete(tempFile);
|
||||
} catch (IOException ex) {
|
||||
logger.warn(new ParameterizedMessage("failed to delete temp file {}", tempFile), ex);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to delete temp file {}", tempFile), ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -20,6 +20,7 @@
|
||||
package org.elasticsearch.indices;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
@ -178,7 +179,7 @@ public class IndexingMemoryController extends AbstractComponent implements Index
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
logger.warn(new ParameterizedMessage("failed to write indexing buffer for shard [{}]; ignoring", shard.shardId()), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to write indexing buffer for shard [{}]; ignoring", shard.shardId()), e);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
@ -22,6 +22,7 @@ package org.elasticsearch.indices;
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.store.LockObtainFailedException;
|
||||
import org.apache.lucene.util.CollectionUtil;
|
||||
@ -98,8 +99,8 @@ import org.elasticsearch.indices.cluster.IndicesClusterStateService;
|
||||
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
|
||||
import org.elasticsearch.indices.mapper.MapperRegistry;
|
||||
import org.elasticsearch.indices.query.IndicesQueriesRegistry;
|
||||
import org.elasticsearch.indices.recovery.RecoveryState;
|
||||
import org.elasticsearch.indices.recovery.PeerRecoveryTargetService;
|
||||
import org.elasticsearch.indices.recovery.RecoveryState;
|
||||
import org.elasticsearch.plugins.PluginsService;
|
||||
import org.elasticsearch.repositories.RepositoriesService;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
@ -219,7 +220,7 @@ public class IndicesService extends AbstractLifecycleComponent
|
||||
try {
|
||||
removeIndex(index, "shutdown", false);
|
||||
} catch (Exception e) {
|
||||
logger.warn(new ParameterizedMessage("failed to remove index on stop [{}]", index), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to remove index on stop [{}]", index), e);
|
||||
} finally {
|
||||
latch.countDown();
|
||||
}
|
||||
@ -297,7 +298,7 @@ public class IndicesService extends AbstractLifecycleComponent
|
||||
}
|
||||
} catch (IllegalIndexShardStateException e) {
|
||||
// we can safely ignore illegal state on ones that are closing for example
|
||||
logger.trace(new ParameterizedMessage("{} ignoring shard stats", indexShard.shardId()), e);
|
||||
logger.trace((Supplier<?>) () -> new ParameterizedMessage("{} ignoring shard stats", indexShard.shardId()), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -475,7 +476,7 @@ public class IndicesService extends AbstractLifecycleComponent
|
||||
try {
|
||||
removeIndex(index, reason, false);
|
||||
} catch (Exception e) {
|
||||
logger.warn(new ParameterizedMessage("failed to remove index ({})", reason), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to remove index ({})", reason), e);
|
||||
}
|
||||
}
|
||||
|
||||
@ -566,7 +567,7 @@ public class IndicesService extends AbstractLifecycleComponent
|
||||
try {
|
||||
removeIndex(index, reason, true);
|
||||
} catch (Exception e) {
|
||||
logger.warn(new ParameterizedMessage("failed to delete index ({})", reason), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to delete index ({})", reason), e);
|
||||
}
|
||||
}
|
||||
|
||||
@ -586,7 +587,7 @@ public class IndicesService extends AbstractLifecycleComponent
|
||||
}
|
||||
deleteIndexStore(reason, metaData, clusterState);
|
||||
} catch (IOException e) {
|
||||
logger.warn(new ParameterizedMessage("[{}] failed to delete unassigned index (reason [{}])", metaData.getIndex(), reason), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to delete unassigned index (reason [{}])", metaData.getIndex(), reason), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -638,9 +639,9 @@ public class IndicesService extends AbstractLifecycleComponent
|
||||
}
|
||||
success = true;
|
||||
} catch (LockObtainFailedException ex) {
|
||||
logger.debug(new ParameterizedMessage("{} failed to delete index store - at least one shards is still locked", index), ex);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} failed to delete index store - at least one shards is still locked", index), ex);
|
||||
} catch (Exception ex) {
|
||||
logger.warn(new ParameterizedMessage("{} failed to delete index", index), ex);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("{} failed to delete index", index), ex);
|
||||
} finally {
|
||||
if (success == false) {
|
||||
addPendingDelete(index, indexSettings);
|
||||
@ -747,7 +748,7 @@ public class IndicesService extends AbstractLifecycleComponent
|
||||
try {
|
||||
metaData = metaStateService.loadIndexState(index);
|
||||
} catch (IOException e) {
|
||||
logger.warn(new ParameterizedMessage("[{}] failed to load state file from a stale deleted index, folders will be left on disk", index), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to load state file from a stale deleted index, folders will be left on disk", index), e);
|
||||
return null;
|
||||
}
|
||||
final IndexSettings indexSettings = buildIndexSettings(metaData);
|
||||
@ -756,7 +757,7 @@ public class IndicesService extends AbstractLifecycleComponent
|
||||
} catch (IOException e) {
|
||||
// we just warn about the exception here because if deleteIndexStoreIfDeletionAllowed
|
||||
// throws an exception, it gets added to the list of pending deletes to be tried again
|
||||
logger.warn(new ParameterizedMessage("[{}] failed to delete index on disk", metaData.getIndex()), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to delete index on disk", metaData.getIndex()), e);
|
||||
}
|
||||
return metaData;
|
||||
}
|
||||
@ -928,7 +929,7 @@ public class IndicesService extends AbstractLifecycleComponent
|
||||
nodeEnv.deleteIndexDirectoryUnderLock(index, indexSettings);
|
||||
iterator.remove();
|
||||
} catch (IOException ex) {
|
||||
logger.debug(new ParameterizedMessage("{} retry pending delete", index), ex);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} retry pending delete", index), ex);
|
||||
}
|
||||
} else {
|
||||
assert delete.shardId != -1;
|
||||
@ -938,7 +939,7 @@ public class IndicesService extends AbstractLifecycleComponent
|
||||
deleteShardStore("pending delete", shardLock, delete.settings);
|
||||
iterator.remove();
|
||||
} catch (IOException ex) {
|
||||
logger.debug(new ParameterizedMessage("{} retry pending delete", shardLock.getShardId()), ex);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} retry pending delete", shardLock.getShardId()), ex);
|
||||
}
|
||||
} else {
|
||||
logger.warn("{} no shard lock for pending delete", delete.shardId);
|
||||
|
@ -19,6 +19,7 @@
|
||||
package org.elasticsearch.indices.analysis;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.apache.lucene.analysis.hunspell.Dictionary;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.SimpleFSDirectory;
|
||||
@ -139,7 +140,9 @@ public class HunspellService extends AbstractComponent {
|
||||
} catch (Exception e) {
|
||||
// The cache loader throws unchecked exception (see #loadDictionary()),
|
||||
// here we simply report the exception and continue loading the dictionaries
|
||||
logger.error(new ParameterizedMessage("exception while loading dictionary {}", file.getFileName()), e);
|
||||
logger.error(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"exception while loading dictionary {}", file.getFileName()), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -197,7 +200,7 @@ public class HunspellService extends AbstractComponent {
|
||||
}
|
||||
|
||||
} catch (Exception e) {
|
||||
logger.error(new ParameterizedMessage("Could not load hunspell dictionary [{}]", locale), e);
|
||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("Could not load hunspell dictionary [{}]", locale), e);
|
||||
throw e;
|
||||
} finally {
|
||||
IOUtils.close(affixStream);
|
||||
|
@ -21,6 +21,7 @@ package org.elasticsearch.indices.cluster;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.apache.lucene.store.LockObtainFailedException;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
@ -62,10 +63,10 @@ import org.elasticsearch.index.shard.ShardNotFoundException;
|
||||
import org.elasticsearch.indices.IndexAlreadyExistsException;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.indices.flush.SyncedFlushService;
|
||||
import org.elasticsearch.indices.recovery.RecoveryFailedException;
|
||||
import org.elasticsearch.indices.recovery.PeerRecoverySourceService;
|
||||
import org.elasticsearch.indices.recovery.RecoveryState;
|
||||
import org.elasticsearch.indices.recovery.PeerRecoveryTargetService;
|
||||
import org.elasticsearch.indices.recovery.RecoveryFailedException;
|
||||
import org.elasticsearch.indices.recovery.RecoveryState;
|
||||
import org.elasticsearch.repositories.RepositoriesService;
|
||||
import org.elasticsearch.search.SearchService;
|
||||
import org.elasticsearch.snapshots.RestoreService;
|
||||
@ -270,7 +271,8 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
|
||||
threadPool.generic().execute(new AbstractRunnable() {
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
logger.warn(new ParameterizedMessage("[{}] failed to complete pending deletion for index", index), e);
|
||||
logger.warn(
|
||||
(Supplier<?>) () -> new ParameterizedMessage("[{}] failed to complete pending deletion for index", index), e);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -638,7 +640,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
|
||||
} catch (Exception inner) {
|
||||
inner.addSuppressed(failure);
|
||||
logger.warn(
|
||||
new ParameterizedMessage(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"[{}][{}] failed to remove shard after failure ([{}])",
|
||||
shardRouting.getIndexName(),
|
||||
shardRouting.getId(),
|
||||
@ -653,13 +655,14 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
|
||||
private void sendFailShard(ShardRouting shardRouting, String message, @Nullable Exception failure) {
|
||||
try {
|
||||
logger.warn(
|
||||
new ParameterizedMessage("[{}] marking and sending shard failed due to [{}]", shardRouting.shardId(), message), failure);
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"[{}] marking and sending shard failed due to [{}]", shardRouting.shardId(), message), failure);
|
||||
failedShardsCache.put(shardRouting.shardId(), shardRouting);
|
||||
shardStateAction.localShardFailed(shardRouting, message, failure, SHARD_STATE_ACTION_LISTENER);
|
||||
} catch (Exception inner) {
|
||||
if (failure != null) inner.addSuppressed(failure);
|
||||
logger.warn(
|
||||
new ParameterizedMessage(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"[{}][{}] failed to mark shard as failed (because of [{}])",
|
||||
shardRouting.getIndexName(),
|
||||
shardRouting.getId(),
|
||||
|
@ -19,6 +19,7 @@
|
||||
package org.elasticsearch.indices.flush;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.indices.flush.FlushRequest;
|
||||
@ -101,7 +102,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
logger.debug(new ParameterizedMessage("{} sync flush on inactive shard failed", indexShard.shardId()), e);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} sync flush on inactive shard failed", indexShard.shardId()), e);
|
||||
}
|
||||
});
|
||||
}
|
||||
@ -336,7 +337,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
|
||||
|
||||
@Override
|
||||
public void handleException(TransportException exp) {
|
||||
logger.trace(new ParameterizedMessage("{} error while performing synced flush on [{}], skipping", shardId, shard), exp);
|
||||
logger.trace((Supplier<?>) () -> new ParameterizedMessage("{} error while performing synced flush on [{}], skipping", shardId, shard), exp);
|
||||
results.put(shard, new ShardSyncedFlushResponse(exp.getMessage()));
|
||||
contDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results);
|
||||
}
|
||||
@ -392,7 +393,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
|
||||
|
||||
@Override
|
||||
public void handleException(TransportException exp) {
|
||||
logger.trace(new ParameterizedMessage("{} error while performing pre synced flush on [{}], skipping", shardId, shard), exp);
|
||||
logger.trace((Supplier<?>) () -> new ParameterizedMessage("{} error while performing pre synced flush on [{}], skipping", shardId, shard), exp);
|
||||
if (countDown.countDown()) {
|
||||
listener.onResponse(commitIds);
|
||||
}
|
||||
|
@ -20,6 +20,7 @@
|
||||
package org.elasticsearch.indices.recovery;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.apache.lucene.store.AlreadyClosedException;
|
||||
import org.apache.lucene.store.RateLimiter;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
@ -142,7 +143,9 @@ public class PeerRecoveryTargetService extends AbstractComponent implements Inde
|
||||
|
||||
protected void retryRecovery(final RecoveryTarget recoveryTarget, final Throwable reason, TimeValue retryAfter, final
|
||||
StartRecoveryRequest currentRequest) {
|
||||
logger.trace(new ParameterizedMessage("will retry recovery with id [{}] in [{}]", recoveryTarget.recoveryId(), retryAfter), reason);
|
||||
logger.trace(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"will retry recovery with id [{}] in [{}]", recoveryTarget.recoveryId(), retryAfter), reason);
|
||||
retryRecovery(recoveryTarget, retryAfter, currentRequest);
|
||||
}
|
||||
|
||||
@ -235,7 +238,7 @@ public class PeerRecoveryTargetService extends AbstractComponent implements Inde
|
||||
} catch (Exception e) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace(
|
||||
new ParameterizedMessage(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"[{}][{}] Got exception on recovery",
|
||||
request.shardId().getIndex().getName(),
|
||||
request.shardId().id()),
|
||||
@ -352,7 +355,7 @@ public class PeerRecoveryTargetService extends AbstractComponent implements Inde
|
||||
// we want to wait until these mappings are processed but also need to do some maintenance and roll back the
|
||||
// number of processed (completed) operations in this batch to ensure accounting is correct.
|
||||
logger.trace(
|
||||
new ParameterizedMessage(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"delaying recovery due to missing mapping changes (rolling back stats for [{}] ops)",
|
||||
exception.completedOperations()),
|
||||
exception);
|
||||
@ -435,7 +438,7 @@ public class PeerRecoveryTargetService extends AbstractComponent implements Inde
|
||||
observer.observedState().getVersion());
|
||||
} catch (Exception e) {
|
||||
logger.debug(
|
||||
new ParameterizedMessage(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"failed waiting for cluster state with version {} (current: {})",
|
||||
clusterStateVersion,
|
||||
observer.observedState()),
|
||||
@ -517,14 +520,17 @@ public class PeerRecoveryTargetService extends AbstractComponent implements Inde
|
||||
public void onFailure(Exception e) {
|
||||
try (RecoveriesCollection.RecoveryRef recoveryRef = onGoingRecoveries.getRecovery(recoveryId)) {
|
||||
if (recoveryRef != null) {
|
||||
logger.error(new ParameterizedMessage("unexpected error during recovery [{}], failing shard", recoveryId), e);
|
||||
logger.error(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"unexpected error during recovery [{}], failing shard", recoveryId), e);
|
||||
onGoingRecoveries.failRecovery(recoveryId,
|
||||
new RecoveryFailedException(recoveryRef.status().state(), "unexpected error", e),
|
||||
true // be safe
|
||||
);
|
||||
} else {
|
||||
logger.debug(
|
||||
new ParameterizedMessage("unexpected error during recovery, but recovery id [{}] is finished", recoveryId), e);
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"unexpected error during recovery, but recovery id [{}] is finished", recoveryId), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -21,6 +21,7 @@ package org.elasticsearch.indices.recovery;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.ElasticsearchTimeoutException;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
@ -223,7 +224,7 @@ public class RecoveriesCollection {
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
logger.error(new ParameterizedMessage("unexpected error while monitoring recovery [{}]", recoveryId), e);
|
||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected error while monitoring recovery [{}]", recoveryId), e);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -32,7 +32,6 @@ import org.apache.lucene.util.ArrayUtil;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.StopWatch;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
@ -316,7 +315,7 @@ public class RecoverySourceHandler {
|
||||
"checksums are ok", null);
|
||||
exception.addSuppressed(targetException);
|
||||
logger.warn(
|
||||
new ParameterizedMessage(
|
||||
(org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage(
|
||||
"{} Remote file corruption during finalization of recovery on node {}. local checksum OK",
|
||||
shard.shardId(),
|
||||
request.targetNode()),
|
||||
@ -563,7 +562,7 @@ public class RecoverySourceHandler {
|
||||
"checksums are ok", null);
|
||||
exception.addSuppressed(e);
|
||||
logger.warn(
|
||||
new ParameterizedMessage(
|
||||
(org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage(
|
||||
"{} Remote file corruption on node {}, recovering {}. local checksum OK",
|
||||
shardId,
|
||||
request.targetNode(),
|
||||
|
@ -21,6 +21,7 @@ package org.elasticsearch.indices.recovery;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.apache.lucene.index.CorruptIndexException;
|
||||
import org.apache.lucene.index.IndexFormatTooNewException;
|
||||
import org.apache.lucene.index.IndexFormatTooOldException;
|
||||
@ -294,7 +295,8 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget
|
||||
try {
|
||||
entry.getValue().close();
|
||||
} catch (Exception e) {
|
||||
logger.debug(new ParameterizedMessage("error while closing recovery output [{}]", entry.getValue()), e);
|
||||
logger.debug(
|
||||
(Supplier<?>) () -> new ParameterizedMessage("error while closing recovery output [{}]", entry.getValue()), e);
|
||||
}
|
||||
iterator.remove();
|
||||
}
|
||||
|
@ -20,6 +20,7 @@
|
||||
package org.elasticsearch.indices.store;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
@ -229,7 +230,7 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe
|
||||
|
||||
@Override
|
||||
public void handleException(TransportException exp) {
|
||||
logger.debug(new ParameterizedMessage("shards active request failed for {}", shardId), exp);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("shards active request failed for {}", shardId), exp);
|
||||
if (awaitingResponses.decrementAndGet() == 0) {
|
||||
allNodesResponded();
|
||||
}
|
||||
@ -267,14 +268,14 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe
|
||||
try {
|
||||
indicesService.deleteShardStore("no longer used", shardId, currentState);
|
||||
} catch (Exception ex) {
|
||||
logger.debug(new ParameterizedMessage("{} failed to delete unallocated shard, ignoring", shardId), ex);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} failed to delete unallocated shard, ignoring", shardId), ex);
|
||||
}
|
||||
return currentState;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Exception e) {
|
||||
logger.error(new ParameterizedMessage("{} unexpected error during deletion of unallocated shard", shardId), e);
|
||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("{} unexpected error during deletion of unallocated shard", shardId), e);
|
||||
}
|
||||
});
|
||||
}
|
||||
@ -324,9 +325,9 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe
|
||||
try {
|
||||
channel.sendResponse(new ShardActiveResponse(shardActive, clusterService.localNode()));
|
||||
} catch (IOException e) {
|
||||
logger.error(new ParameterizedMessage("failed send response for shard active while trying to delete shard {} - shard will probably not be removed", request.shardId), e);
|
||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("failed send response for shard active while trying to delete shard {} - shard will probably not be removed", request.shardId), e);
|
||||
} catch (EsRejectedExecutionException e) {
|
||||
logger.error(new ParameterizedMessage("failed send response for shard active while trying to delete shard {} - shard will probably not be removed", request.shardId), e);
|
||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("failed send response for shard active while trying to delete shard {} - shard will probably not be removed", request.shardId), e);
|
||||
}
|
||||
}
|
||||
}, new ClusterStateObserver.ValidationPredicate() {
|
||||
|
@ -20,6 +20,7 @@
|
||||
package org.elasticsearch.monitor.fs;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.apache.lucene.util.Constants;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
@ -113,7 +114,9 @@ public class FsProbe extends AbstractComponent {
|
||||
} catch (Exception e) {
|
||||
// do not fail Elasticsearch if something unexpected
|
||||
// happens here
|
||||
logger.debug(new ParameterizedMessage("unexpected exception processing /proc/diskstats for devices {}", devicesNumbers), e);
|
||||
logger.debug(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"unexpected exception processing /proc/diskstats for devices {}", devicesNumbers), e);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
@ -21,6 +21,7 @@ package org.elasticsearch.plugins;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.apache.lucene.analysis.util.CharFilterFactory;
|
||||
import org.apache.lucene.analysis.util.TokenFilterFactory;
|
||||
import org.apache.lucene.analysis.util.TokenizerFactory;
|
||||
@ -246,7 +247,7 @@ public class PluginsService extends AbstractComponent {
|
||||
logger.warn("plugin {}, failed to invoke custom onModule method", e, plugin.v1().getName());
|
||||
throw new ElasticsearchException("failed to invoke onModule", e);
|
||||
} catch (Exception e) {
|
||||
logger.warn(new ParameterizedMessage("plugin {}, failed to invoke custom onModule method", plugin.v1().getName()), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("plugin {}, failed to invoke custom onModule method", plugin.v1().getName()), e);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
@ -20,6 +20,7 @@
|
||||
package org.elasticsearch.repositories;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.cluster.AckedClusterStateUpdateTask;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
@ -141,7 +142,7 @@ public class RepositoriesService extends AbstractComponent implements ClusterSta
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Exception e) {
|
||||
logger.warn(new ParameterizedMessage("failed to create repository [{}]", request.name), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to create repository [{}]", request.name), e);
|
||||
super.onFailure(source, e);
|
||||
}
|
||||
|
||||
@ -215,7 +216,7 @@ public class RepositoriesService extends AbstractComponent implements ClusterSta
|
||||
try {
|
||||
repository.endVerification(verificationToken);
|
||||
} catch (Exception e) {
|
||||
logger.warn(new ParameterizedMessage("[{}] failed to finish repository verification", repositoryName), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to finish repository verification", repositoryName), e);
|
||||
listener.onFailure(e);
|
||||
return;
|
||||
}
|
||||
@ -232,7 +233,7 @@ public class RepositoriesService extends AbstractComponent implements ClusterSta
|
||||
repository.endVerification(verificationToken);
|
||||
} catch (Exception inner) {
|
||||
inner.addSuppressed(e);
|
||||
logger.warn(new ParameterizedMessage("[{}] failed to finish repository verification", repositoryName), inner);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to finish repository verification", repositoryName), inner);
|
||||
}
|
||||
listener.onFailure(e);
|
||||
}
|
||||
@ -294,14 +295,14 @@ public class RepositoriesService extends AbstractComponent implements ClusterSta
|
||||
} catch (RepositoryException ex) {
|
||||
// TODO: this catch is bogus, it means the old repo is already closed,
|
||||
// but we have nothing to replace it
|
||||
logger.warn(new ParameterizedMessage("failed to change repository [{}]", repositoryMetaData.name()), ex);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to change repository [{}]", repositoryMetaData.name()), ex);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
try {
|
||||
repository = createRepository(repositoryMetaData);
|
||||
} catch (RepositoryException ex) {
|
||||
logger.warn(new ParameterizedMessage("failed to create repository [{}]", repositoryMetaData.name()), ex);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to create repository [{}]", repositoryMetaData.name()), ex);
|
||||
}
|
||||
}
|
||||
if (repository != null) {
|
||||
@ -383,7 +384,7 @@ public class RepositoriesService extends AbstractComponent implements ClusterSta
|
||||
repository.start();
|
||||
return repository;
|
||||
} catch (Exception e) {
|
||||
logger.warn(new ParameterizedMessage("failed to create repository [{}][{}]", repositoryMetaData.type(), repositoryMetaData.name()), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to create repository [{}][{}]", repositoryMetaData.type(), repositoryMetaData.name()), e);
|
||||
throw new RepositoryException(repositoryMetaData.name(), "failed to create repository", e);
|
||||
}
|
||||
}
|
||||
|
@ -22,6 +22,7 @@ package org.elasticsearch.repositories;
|
||||
import com.carrotsearch.hppc.ObjectContainer;
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
@ -84,7 +85,7 @@ public class VerifyNodeRepositoryAction extends AbstractComponent {
|
||||
try {
|
||||
doVerify(repository, verificationToken, localNode);
|
||||
} catch (Exception e) {
|
||||
logger.warn(new ParameterizedMessage("[{}] failed to verify repository", repository), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to verify repository", repository), e);
|
||||
errors.add(new VerificationFailure(node.getId(), e));
|
||||
}
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
@ -155,7 +156,7 @@ public class VerifyNodeRepositoryAction extends AbstractComponent {
|
||||
try {
|
||||
doVerify(request.repository, request.verificationToken, localNode);
|
||||
} catch (Exception ex) {
|
||||
logger.warn(new ParameterizedMessage("[{}] failed to verify repository", request.repository), ex);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to verify repository", request.repository), ex);
|
||||
throw ex;
|
||||
}
|
||||
channel.sendResponse(TransportResponse.Empty.INSTANCE);
|
||||
|
@ -20,6 +20,7 @@
|
||||
package org.elasticsearch.repositories.blobstore;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.apache.lucene.index.CorruptIndexException;
|
||||
import org.apache.lucene.index.IndexCommit;
|
||||
import org.apache.lucene.index.IndexFormatTooNewException;
|
||||
@ -354,7 +355,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
||||
try {
|
||||
snapshotInfo = getSnapshotInfo(snapshotId);
|
||||
} catch (SnapshotException e) {
|
||||
logger.warn(new ParameterizedMessage("[{}] repository is on a pre-5.0 format with an index file that contains snapshot [{}] but " +
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] repository is on a pre-5.0 format with an index file that contains snapshot [{}] but " +
|
||||
"the corresponding snap-{}.dat file cannot be read. The snapshot will no longer be included in " +
|
||||
"the repository but its data directories will remain.", getMetadata().name(), snapshotId, snapshotId.getUUID()), e);
|
||||
continue;
|
||||
@ -424,7 +425,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
||||
try {
|
||||
indexMetaDataFormat(snapshot.version()).delete(indexMetaDataBlobContainer, snapshotId.getUUID());
|
||||
} catch (IOException ex) {
|
||||
logger.warn(new ParameterizedMessage("[{}] failed to delete metadata for index [{}]", snapshotId, index), ex);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to delete metadata for index [{}]", snapshotId, index), ex);
|
||||
}
|
||||
if (metaData != null) {
|
||||
IndexMetaData indexMetaData = metaData.index(index);
|
||||
@ -433,7 +434,8 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
||||
try {
|
||||
delete(snapshotId, snapshot.version(), indexId, new ShardId(indexMetaData.getIndex(), shardId));
|
||||
} catch (SnapshotException ex) {
|
||||
logger.warn(new ParameterizedMessage("[{}] failed to delete shard data for shard [{}][{}]", snapshotId, index, shardId), ex);
|
||||
final int finalShardId = shardId;
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to delete shard data for shard [{}][{}]", snapshotId, index, finalShardId), ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -452,11 +454,11 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
||||
// we'll ignore that and accept that cleanup didn't fully succeed.
|
||||
// since we are using UUIDs for path names, this won't be an issue for
|
||||
// snapshotting indices of the same name
|
||||
logger.debug(new ParameterizedMessage("[{}] index [{}] no longer part of any snapshots in the repository, but failed to clean up " +
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] index [{}] no longer part of any snapshots in the repository, but failed to clean up " +
|
||||
"its index folder due to the directory not being empty.", metadata.name(), indexId), dnee);
|
||||
} catch (IOException ioe) {
|
||||
// a different IOException occurred while trying to delete - will just log the issue for now
|
||||
logger.debug(new ParameterizedMessage("[{}] index [{}] no longer part of any snapshots in the repository, but failed to clean up " +
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] index [{}] no longer part of any snapshots in the repository, but failed to clean up " +
|
||||
"its index folder.", metadata.name(), indexId), ioe);
|
||||
}
|
||||
}
|
||||
@ -471,7 +473,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
||||
try {
|
||||
snapshotFormat(snapshotInfo.version()).delete(snapshotsBlobContainer, blobId);
|
||||
} catch (IOException e) {
|
||||
logger.warn(new ParameterizedMessage("[{}] Unable to delete snapshot file [{}]", snapshotInfo.snapshotId(), blobId), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] Unable to delete snapshot file [{}]", snapshotInfo.snapshotId(), blobId), e);
|
||||
}
|
||||
} else {
|
||||
// we don't know the version, first try the current format, then the legacy format
|
||||
@ -483,7 +485,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
||||
snapshotLegacyFormat.delete(snapshotsBlobContainer, blobId);
|
||||
} catch (IOException e2) {
|
||||
// neither snapshot file could be deleted, log the error
|
||||
logger.warn(new ParameterizedMessage("Unable to delete snapshot file [{}]", blobId), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("Unable to delete snapshot file [{}]", blobId), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -495,7 +497,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
||||
try {
|
||||
globalMetaDataFormat(snapshotInfo.version()).delete(snapshotsBlobContainer, blobId);
|
||||
} catch (IOException e) {
|
||||
logger.warn(new ParameterizedMessage("[{}] Unable to delete global metadata file [{}]", snapshotInfo.snapshotId(), blobId), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] Unable to delete global metadata file [{}]", snapshotInfo.snapshotId(), blobId), e);
|
||||
}
|
||||
} else {
|
||||
// we don't know the version, first try the current format, then the legacy format
|
||||
@ -507,7 +509,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
||||
globalMetaDataLegacyFormat.delete(snapshotsBlobContainer, blobId);
|
||||
} catch (IOException e2) {
|
||||
// neither global metadata file could be deleted, log the error
|
||||
logger.warn(new ParameterizedMessage("Unable to delete global metadata file [{}]", blobId), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("Unable to delete global metadata file [{}]", blobId), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1074,7 +1076,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
||||
blobContainer.deleteBlob(blobName);
|
||||
} catch (IOException e) {
|
||||
// TODO: don't catch and let the user handle it?
|
||||
logger.debug(new ParameterizedMessage("[{}] [{}] error deleting blob [{}] during cleanup", snapshotId, shardId, blobName), e);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] [{}] error deleting blob [{}] during cleanup", snapshotId, shardId, blobName), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1151,7 +1153,8 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
||||
indexShardSnapshotsFormat.read(blobContainer, Integer.toString(latest));
|
||||
return new Tuple<>(shardSnapshots, latest);
|
||||
} catch (IOException e) {
|
||||
logger.warn(new ParameterizedMessage("failed to read index file [{}]", SNAPSHOT_INDEX_PREFIX + latest), e);
|
||||
final String file = SNAPSHOT_INDEX_PREFIX + latest;
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to read index file [{}]", file), e);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1169,7 +1172,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
||||
snapshots.add(new SnapshotFiles(snapshot.snapshot(), snapshot.indexFiles()));
|
||||
}
|
||||
} catch (IOException e) {
|
||||
logger.warn(new ParameterizedMessage("failed to read commit point [{}]", name), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to read commit point [{}]", name), e);
|
||||
}
|
||||
}
|
||||
return new Tuple<>(new BlobStoreIndexShardSnapshots(snapshots), -1);
|
||||
@ -1252,7 +1255,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
||||
// in a bwc compatible way.
|
||||
maybeRecalculateMetadataHash(blobContainer, fileInfo, metadata);
|
||||
} catch (Exception e) {
|
||||
logger.warn(new ParameterizedMessage("{} Can't calculate hash from blob for file [{}] [{}]", shardId, fileInfo.physicalName(), fileInfo.metadata()), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("{} Can't calculate hash from blob for file [{}] [{}]", shardId, fileInfo.physicalName(), fileInfo.metadata()), e);
|
||||
}
|
||||
if (fileInfo.isSame(md) && snapshotFileExistsInBlobs(fileInfo, blobs)) {
|
||||
// a commit point file with the same name, size and checksum was already copied to repository
|
||||
@ -1525,7 +1528,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
||||
logger.trace("[{}] [{}] restoring from to an empty shard", shardId, snapshotId);
|
||||
recoveryTargetMetadata = Store.MetadataSnapshot.EMPTY;
|
||||
} catch (IOException e) {
|
||||
logger.warn(new ParameterizedMessage("{} Can't read metadata from store, will not reuse any local file while restoring", shardId), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("{} Can't read metadata from store, will not reuse any local file while restoring", shardId), e);
|
||||
recoveryTargetMetadata = Store.MetadataSnapshot.EMPTY;
|
||||
}
|
||||
|
||||
@ -1541,7 +1544,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
||||
maybeRecalculateMetadataHash(blobContainer, fileInfo, recoveryTargetMetadata);
|
||||
} catch (Exception e) {
|
||||
// if the index is broken we might not be able to read it
|
||||
logger.warn(new ParameterizedMessage("{} Can't calculate hash from blog for file [{}] [{}]", shardId, fileInfo.physicalName(), fileInfo.metadata()), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("{} Can't calculate hash from blog for file [{}] [{}]", shardId, fileInfo.physicalName(), fileInfo.metadata()), e);
|
||||
}
|
||||
snapshotMetaData.put(fileInfo.metadata().name(), fileInfo.metadata());
|
||||
fileInfos.put(fileInfo.metadata().name(), fileInfo);
|
||||
|
@ -21,6 +21,7 @@ package org.elasticsearch.rest;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
@ -124,9 +125,9 @@ public class BytesRestResponse extends RestResponse {
|
||||
params = new ToXContent.DelegatingMapParams(Collections.singletonMap(ElasticsearchException.REST_EXCEPTION_SKIP_STACK_TRACE, "false"), channel.request());
|
||||
} else {
|
||||
if (status.getStatus() < 500) {
|
||||
SUPPRESSED_ERROR_LOGGER.debug(new ParameterizedMessage("path: {}, params: {}", channel.request().rawPath(), channel.request().params()), e);
|
||||
SUPPRESSED_ERROR_LOGGER.debug((Supplier<?>) () -> new ParameterizedMessage("path: {}, params: {}", channel.request().rawPath(), channel.request().params()), e);
|
||||
} else {
|
||||
SUPPRESSED_ERROR_LOGGER.warn(new ParameterizedMessage("path: {}, params: {}", channel.request().rawPath(), channel.request().params()), e);
|
||||
SUPPRESSED_ERROR_LOGGER.warn((Supplier<?>) () -> new ParameterizedMessage("path: {}, params: {}", channel.request().rawPath(), channel.request().params()), e);
|
||||
}
|
||||
params = channel.request();
|
||||
}
|
||||
|
@ -20,6 +20,7 @@
|
||||
package org.elasticsearch.rest;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.client.node.NodeClient;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
@ -209,7 +210,7 @@ public class RestController extends AbstractLifecycleComponent {
|
||||
channel.sendResponse(new BytesRestResponse(channel, e));
|
||||
} catch (Exception inner) {
|
||||
inner.addSuppressed(e);
|
||||
logger.error(new ParameterizedMessage("failed to send failure response for uri [{}]", request.uri()), inner);
|
||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("failed to send failure response for uri [{}]", request.uri()), inner);
|
||||
}
|
||||
}
|
||||
|
||||
@ -311,7 +312,7 @@ public class RestController extends AbstractLifecycleComponent {
|
||||
try {
|
||||
channel.sendResponse(new BytesRestResponse(channel, e));
|
||||
} catch (IOException e1) {
|
||||
logger.error(new ParameterizedMessage("Failed to send failure response for uri [{}]", request.uri()), e1);
|
||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("Failed to send failure response for uri [{}]", request.uri()), e1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -20,6 +20,7 @@
|
||||
package org.elasticsearch.script;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.ResourceNotFoundException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
@ -605,7 +606,7 @@ public class ScriptService extends AbstractComponent implements Closeable, Clust
|
||||
logger.warn("skipping compile of script file [{}] as all scripted operations are disabled for file scripts", file.toAbsolutePath());
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.warn(new ParameterizedMessage("failed to load/compile script [{}]", scriptNameExt.v1()), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to load/compile script [{}]", scriptNameExt.v1()), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -23,6 +23,7 @@ import com.carrotsearch.hppc.IntSet;
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
@ -461,7 +462,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Exception e) {
|
||||
logger.warn(new ParameterizedMessage("[{}] failed to restore snapshot", snapshotId), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to restore snapshot", snapshotId), e);
|
||||
listener.onFailure(e);
|
||||
}
|
||||
|
||||
@ -478,7 +479,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
|
||||
|
||||
|
||||
} catch (Exception e) {
|
||||
logger.warn(new ParameterizedMessage("[{}] failed to restore snapshot", request.repositoryName + ":" + request.snapshotName), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to restore snapshot", request.repositoryName + ":" + request.snapshotName), e);
|
||||
listener.onFailure(e);
|
||||
}
|
||||
}
|
||||
@ -602,7 +603,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
|
||||
@Override
|
||||
public void onFailure(String source, @Nullable Exception e) {
|
||||
for (UpdateIndexShardRestoreStatusRequest request : drainedRequests) {
|
||||
logger.warn(new ParameterizedMessage("[{}][{}] failed to update snapshot status to [{}]", request.snapshot(), request.shardId(), request.status()), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}][{}] failed to update snapshot status to [{}]", request.snapshot(), request.shardId(), request.status()), e);
|
||||
}
|
||||
}
|
||||
|
||||
@ -670,7 +671,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
|
||||
try {
|
||||
listener.onResponse(new RestoreCompletionResponse(snapshot, restoreInfo));
|
||||
} catch (Exception e) {
|
||||
logger.warn(new ParameterizedMessage("failed to update snapshot status for [{}]", listener), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to update snapshot status for [{}]", listener), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -21,6 +21,7 @@ package org.elasticsearch.snapshots;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.apache.lucene.index.IndexCommit;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
@ -313,7 +314,7 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
logger.warn(new ParameterizedMessage("[{}] [{}] failed to create snapshot", shardId, entry.getKey()), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] [{}] failed to create snapshot", shardId, entry.getKey()), e);
|
||||
updateIndexShardSnapshotStatus(entry.getKey(), shardId, new SnapshotsInProgress.ShardSnapshotStatus(localNodeId, SnapshotsInProgress.State.FAILED, ExceptionsHelper.detailedMessage(e)));
|
||||
}
|
||||
|
||||
@ -495,7 +496,7 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements
|
||||
UPDATE_SNAPSHOT_ACTION_NAME, request, EmptyTransportResponseHandler.INSTANCE_SAME);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.warn(new ParameterizedMessage("[{}] [{}] failed to update snapshot state", request.snapshot(), request.status()), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] [{}] failed to update snapshot state", request.snapshot(), request.status()), e);
|
||||
}
|
||||
}
|
||||
|
||||
@ -579,7 +580,7 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements
|
||||
@Override
|
||||
public void onFailure(String source, Exception e) {
|
||||
for (UpdateIndexShardSnapshotStatusRequest request : drainedRequests) {
|
||||
logger.warn(new ParameterizedMessage("[{}][{}] failed to update snapshot status to [{}]", request.snapshot(), request.shardId(), request.status()), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}][{}] failed to update snapshot status to [{}]", request.snapshot(), request.shardId(), request.status()), e);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
@ -22,6 +22,7 @@ package org.elasticsearch.snapshots;
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.apache.lucene.util.CollectionUtil;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
@ -181,7 +182,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
|
||||
snapshotSet.add(repository.getSnapshotInfo(snapshotId));
|
||||
} catch (Exception ex) {
|
||||
if (ignoreUnavailable) {
|
||||
logger.warn(new ParameterizedMessage("failed to get snapshot [{}]", snapshotId), ex);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to get snapshot [{}]", snapshotId), ex);
|
||||
} else {
|
||||
throw new SnapshotException(repositoryName, snapshotId, "Snapshot could not be read", ex);
|
||||
}
|
||||
@ -255,7 +256,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Exception e) {
|
||||
logger.warn(new ParameterizedMessage("[{}][{}] failed to create snapshot", repositoryName, snapshotName), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}][{}] failed to create snapshot", repositoryName, snapshotName), e);
|
||||
newSnapshot = null;
|
||||
listener.onFailure(e);
|
||||
}
|
||||
@ -406,7 +407,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Exception e) {
|
||||
logger.warn(new ParameterizedMessage("[{}] failed to create snapshot", snapshot.snapshot().getSnapshotId()), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to create snapshot", snapshot.snapshot().getSnapshotId()), e);
|
||||
removeSnapshotFromClusterState(snapshot.snapshot(), null, e, new CleanupAfterErrorListener(snapshot, true, userCreateSnapshotListener, e));
|
||||
}
|
||||
|
||||
@ -428,7 +429,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
|
||||
}
|
||||
});
|
||||
} catch (Exception e) {
|
||||
logger.warn(new ParameterizedMessage("failed to create snapshot [{}]", snapshot.snapshot().getSnapshotId()), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to create snapshot [{}]", snapshot.snapshot().getSnapshotId()), e);
|
||||
removeSnapshotFromClusterState(snapshot.snapshot(), null, e, new CleanupAfterErrorListener(snapshot, snapshotCreated, userCreateSnapshotListener, e));
|
||||
}
|
||||
}
|
||||
@ -470,7 +471,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
|
||||
Collections.emptyList());
|
||||
} catch (Exception inner) {
|
||||
inner.addSuppressed(exception);
|
||||
logger.warn(new ParameterizedMessage("[{}] failed to close snapshot in repository", snapshot.snapshot()), inner);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to close snapshot in repository", snapshot.snapshot()), inner);
|
||||
}
|
||||
}
|
||||
userCreateSnapshotListener.onFailure(e);
|
||||
@ -723,7 +724,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Exception e) {
|
||||
logger.warn(new ParameterizedMessage("failed to update snapshot state after shards started from [{}] ", source), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to update snapshot state after shards started from [{}] ", source), e);
|
||||
}
|
||||
});
|
||||
}
|
||||
@ -877,7 +878,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
|
||||
SnapshotInfo snapshotInfo = repository.finalizeSnapshot(snapshot.getSnapshotId(), entry.indices(), entry.startTime(), failure, entry.shards().size(), Collections.unmodifiableList(shardFailures));
|
||||
removeSnapshotFromClusterState(snapshot, snapshotInfo, null);
|
||||
} catch (Exception e) {
|
||||
logger.warn(new ParameterizedMessage("[{}] failed to finalize snapshot", snapshot), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to finalize snapshot", snapshot), e);
|
||||
removeSnapshotFromClusterState(snapshot, null, e);
|
||||
}
|
||||
}
|
||||
@ -926,7 +927,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Exception e) {
|
||||
logger.warn(new ParameterizedMessage("[{}] failed to remove snapshot metadata", snapshot), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to remove snapshot metadata", snapshot), e);
|
||||
if (listener != null) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
@ -942,7 +943,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
|
||||
listener.onSnapshotFailure(snapshot, failure);
|
||||
}
|
||||
} catch (Exception t) {
|
||||
logger.warn(new ParameterizedMessage("failed to notify listener [{}]", listener), t);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to notify listener [{}]", listener), t);
|
||||
}
|
||||
}
|
||||
if (listener != null) {
|
||||
|
@ -21,6 +21,7 @@ package org.elasticsearch.tasks;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
|
||||
/**
|
||||
@ -50,6 +51,6 @@ public final class LoggingTaskListener<Response> implements TaskListener<Respons
|
||||
|
||||
@Override
|
||||
public void onFailure(Task task, Throwable e) {
|
||||
logger.warn(new ParameterizedMessage("{} failed with exception", task.getId()), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("{} failed with exception", task.getId()), e);
|
||||
}
|
||||
}
|
||||
|
@ -20,6 +20,7 @@
|
||||
package org.elasticsearch.tasks;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ElasticsearchTimeoutException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
@ -47,7 +48,6 @@ import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
import static org.elasticsearch.ExceptionsHelper.detailedMessage;
|
||||
import static org.elasticsearch.common.unit.TimeValue.timeValueMillis;
|
||||
|
||||
/**
|
||||
@ -168,7 +168,8 @@ public class TaskManager extends AbstractComponent implements ClusterStateListen
|
||||
try {
|
||||
taskResult = task.result(localNode, error);
|
||||
} catch (IOException ex) {
|
||||
logger.warn(new ParameterizedMessage("couldn't store error {}", ExceptionsHelper.detailedMessage(error)), ex);
|
||||
logger.warn(
|
||||
(Supplier<?>) () -> new ParameterizedMessage("couldn't store error {}", ExceptionsHelper.detailedMessage(error)), ex);
|
||||
listener.onFailure(ex);
|
||||
return;
|
||||
}
|
||||
@ -180,7 +181,8 @@ public class TaskManager extends AbstractComponent implements ClusterStateListen
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
logger.warn(new ParameterizedMessage("couldn't store error {}", ExceptionsHelper.detailedMessage(error)), e);
|
||||
logger.warn(
|
||||
(Supplier<?>) () -> new ParameterizedMessage("couldn't store error {}", ExceptionsHelper.detailedMessage(error)), e);
|
||||
listener.onFailure(e);
|
||||
}
|
||||
});
|
||||
@ -201,7 +203,7 @@ public class TaskManager extends AbstractComponent implements ClusterStateListen
|
||||
try {
|
||||
taskResult = task.result(localNode, response);
|
||||
} catch (IOException ex) {
|
||||
logger.warn(new ParameterizedMessage("couldn't store response {}", response), ex);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("couldn't store response {}", response), ex);
|
||||
listener.onFailure(ex);
|
||||
return;
|
||||
}
|
||||
@ -214,7 +216,7 @@ public class TaskManager extends AbstractComponent implements ClusterStateListen
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
logger.warn(new ParameterizedMessage("couldn't store response {}", response), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("couldn't store response {}", response), e);
|
||||
listener.onFailure(e);
|
||||
}
|
||||
});
|
||||
|
@ -19,6 +19,7 @@
|
||||
package org.elasticsearch.tasks;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
@ -164,7 +165,9 @@ public class TaskResultsService extends AbstractComponent {
|
||||
Streams.copy(is, out);
|
||||
return out.toString(IOUtils.UTF_8);
|
||||
} catch (Exception e) {
|
||||
logger.error(new ParameterizedMessage("failed to create tasks results index template [{}]", TASK_RESULT_INDEX_MAPPING_FILE), e);
|
||||
logger.error(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"failed to create tasks results index template [{}]", TASK_RESULT_INDEX_MAPPING_FILE), e);
|
||||
throw new IllegalStateException("failed to create tasks results index template [" + TASK_RESULT_INDEX_MAPPING_FILE + "]", e);
|
||||
}
|
||||
|
||||
|
@ -20,6 +20,7 @@
|
||||
package org.elasticsearch.threadpool;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.apache.lucene.util.Counter;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
@ -414,7 +415,7 @@ public class ThreadPool extends AbstractComponent implements Closeable {
|
||||
try {
|
||||
runnable.run();
|
||||
} catch (Exception e) {
|
||||
logger.warn(new ParameterizedMessage("failed to run {}", runnable.toString()), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to run {}", runnable.toString()), e);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
@ -780,14 +781,14 @@ public class ThreadPool extends AbstractComponent implements Closeable {
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
threadPool.logger.warn(new ParameterizedMessage("failed to run scheduled task [{}] on thread pool [{}]", runnable.toString(), executor), e);
|
||||
threadPool.logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to run scheduled task [{}] on thread pool [{}]", runnable.toString(), executor), e);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onRejection(Exception e) {
|
||||
run = false;
|
||||
if (threadPool.logger.isDebugEnabled()) {
|
||||
threadPool.logger.debug(new ParameterizedMessage("scheduled task [{}] was rejected on thread pool [{}]", runnable, executor), e);
|
||||
threadPool.logger.debug((Supplier<?>) () -> new ParameterizedMessage("scheduled task [{}] was rejected on thread pool [{}]", runnable, executor), e);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -21,6 +21,7 @@ package org.elasticsearch.transport;
|
||||
import com.carrotsearch.hppc.IntHashSet;
|
||||
import com.carrotsearch.hppc.IntSet;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
@ -259,10 +260,13 @@ public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent i
|
||||
sendMessage(channel, pingHeader, successfulPings::inc, false);
|
||||
} catch (Exception e) {
|
||||
if (isOpen(channel)) {
|
||||
logger.debug(new ParameterizedMessage("[{}] failed to send ping transport message", node), e);
|
||||
logger.debug(
|
||||
(Supplier<?>) () -> new ParameterizedMessage("[{}] failed to send ping transport message", node), e);
|
||||
failedPings.inc();
|
||||
} else {
|
||||
logger.trace(new ParameterizedMessage("[{}] failed to send ping transport message (channel closed)", node), e);
|
||||
logger.trace(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"[{}] failed to send ping transport message (channel closed)", node), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -398,7 +402,9 @@ public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent i
|
||||
try {
|
||||
nodeChannels = connectToChannels(node);
|
||||
} catch (Exception e) {
|
||||
logger.trace(new ParameterizedMessage("failed to connect to [{}], cleaning dangling connections", node), e);
|
||||
logger.trace(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"failed to connect to [{}], cleaning dangling connections", node), e);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
@ -773,7 +779,9 @@ public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent i
|
||||
try {
|
||||
closeChannels(entry.getValue());
|
||||
} catch (Exception e) {
|
||||
logger.debug(new ParameterizedMessage("Error closing serverChannel for profile [{}]", entry.getKey()), e);
|
||||
logger.debug(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"Error closing serverChannel for profile [{}]", entry.getKey()), e);
|
||||
}
|
||||
}
|
||||
|
||||
@ -804,23 +812,23 @@ public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent i
|
||||
}
|
||||
if (isCloseConnectionException(e)) {
|
||||
logger.trace(
|
||||
new ParameterizedMessage(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"close connection exception caught on transport layer [{}], disconnecting from relevant node",
|
||||
channel),
|
||||
e);
|
||||
// close the channel, which will cause a node to be disconnected if relevant
|
||||
disconnectFromNodeChannel(channel, e);
|
||||
} else if (isConnectException(e)) {
|
||||
logger.trace(new ParameterizedMessage("connect exception caught on transport layer [{}]", channel), e);
|
||||
logger.trace((Supplier<?>) () -> new ParameterizedMessage("connect exception caught on transport layer [{}]", channel), e);
|
||||
// close the channel as safe measure, which will cause a node to be disconnected if relevant
|
||||
disconnectFromNodeChannel(channel, e);
|
||||
} else if (e instanceof BindException) {
|
||||
logger.trace(new ParameterizedMessage("bind exception caught on transport layer [{}]", channel), e);
|
||||
logger.trace((Supplier<?>) () -> new ParameterizedMessage("bind exception caught on transport layer [{}]", channel), e);
|
||||
// close the channel as safe measure, which will cause a node to be disconnected if relevant
|
||||
disconnectFromNodeChannel(channel, e);
|
||||
} else if (e instanceof CancelledKeyException) {
|
||||
logger.trace(
|
||||
new ParameterizedMessage(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"cancelled key exception caught on transport layer [{}], disconnecting from relevant node",
|
||||
channel),
|
||||
e);
|
||||
@ -832,7 +840,8 @@ public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent i
|
||||
sendMessage(channel, new BytesArray(e.getMessage().getBytes(StandardCharsets.UTF_8)), () -> {}, true);
|
||||
}
|
||||
} else {
|
||||
logger.warn(new ParameterizedMessage("exception caught on transport layer [{}], closing connection", channel), e);
|
||||
logger.warn(
|
||||
(Supplier<?>) () -> new ParameterizedMessage("exception caught on transport layer [{}], closing connection", channel), e);
|
||||
// close the channel, which will cause a node to be disconnected if relevant
|
||||
disconnectFromNodeChannel(channel, e);
|
||||
}
|
||||
@ -1267,7 +1276,7 @@ public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent i
|
||||
try {
|
||||
handler.handleException(rtx);
|
||||
} catch (Exception e) {
|
||||
logger.error(new ParameterizedMessage("failed to handle exception response [{}]", handler), e);
|
||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("failed to handle exception response [{}]", handler), e);
|
||||
}
|
||||
});
|
||||
}
|
||||
@ -1304,7 +1313,9 @@ public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent i
|
||||
transportChannel.sendResponse(e);
|
||||
} catch (IOException inner) {
|
||||
inner.addSuppressed(e);
|
||||
logger.warn(new ParameterizedMessage("Failed to send error message back to client for action [{}]", action), inner);
|
||||
logger.warn(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"Failed to send error message back to client for action [{}]", action), inner);
|
||||
}
|
||||
}
|
||||
return action;
|
||||
@ -1351,7 +1362,8 @@ public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent i
|
||||
} catch (Exception inner) {
|
||||
inner.addSuppressed(e);
|
||||
logger.warn(
|
||||
new ParameterizedMessage("Failed to send error message back to client for action [{}]", reg.getAction()), inner);
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"Failed to send error message back to client for action [{}]", reg.getAction()), inner);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -64,7 +64,11 @@ public class TransportChannelResponseHandler<T extends TransportResponse> implem
|
||||
channel.sendResponse(exp);
|
||||
} catch (IOException e) {
|
||||
logger.debug(
|
||||
new ParameterizedMessage("failed to send failure {}", extraInfoOnError == null ? "" : "(" + extraInfoOnError + ")"), e);
|
||||
(org.apache.logging.log4j.util.Supplier<?>)
|
||||
() -> new ParameterizedMessage(
|
||||
"failed to send failure {}",
|
||||
extraInfoOnError == null ? "" : "(" + extraInfoOnError + ")"),
|
||||
e);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -207,7 +207,7 @@ public class TransportService extends AbstractLifecycleComponent {
|
||||
public void onRejection(Exception e) {
|
||||
// if we get rejected during node shutdown we don't wanna bubble it up
|
||||
logger.debug(
|
||||
new ParameterizedMessage(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"failed to notify response handler on rejection, action: {}",
|
||||
holderToNotify.action()),
|
||||
e);
|
||||
@ -215,7 +215,7 @@ public class TransportService extends AbstractLifecycleComponent {
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
logger.warn(
|
||||
new ParameterizedMessage(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"failed to notify response handler on exception, action: {}",
|
||||
holderToNotify.action()),
|
||||
e);
|
||||
@ -493,7 +493,7 @@ public class TransportService extends AbstractLifecycleComponent {
|
||||
public void onRejection(Exception e) {
|
||||
// if we get rejected during node shutdown we don't wanna bubble it up
|
||||
logger.debug(
|
||||
new ParameterizedMessage(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"failed to notify response handler on rejection, action: {}",
|
||||
holderToNotify.action()),
|
||||
e);
|
||||
@ -501,7 +501,7 @@ public class TransportService extends AbstractLifecycleComponent {
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
logger.warn(
|
||||
new ParameterizedMessage(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"failed to notify response handler on exception, action: {}",
|
||||
holderToNotify.action()),
|
||||
e);
|
||||
@ -546,7 +546,8 @@ public class TransportService extends AbstractLifecycleComponent {
|
||||
} catch (Exception inner) {
|
||||
inner.addSuppressed(e);
|
||||
logger.warn(
|
||||
new ParameterizedMessage("failed to notify channel of error message for action [{}]", action), inner);
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"failed to notify channel of error message for action [{}]", action), inner);
|
||||
}
|
||||
}
|
||||
});
|
||||
@ -557,7 +558,9 @@ public class TransportService extends AbstractLifecycleComponent {
|
||||
channel.sendResponse(e);
|
||||
} catch (Exception inner) {
|
||||
inner.addSuppressed(e);
|
||||
logger.warn(new ParameterizedMessage("failed to notify channel of error message for action [{}]", action), inner);
|
||||
logger.warn(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"failed to notify channel of error message for action [{}]", action), inner);
|
||||
}
|
||||
}
|
||||
|
||||
@ -679,7 +682,9 @@ public class TransportService extends AbstractLifecycleComponent {
|
||||
}
|
||||
|
||||
protected void traceResponseSent(long requestId, String action, Exception e) {
|
||||
tracerLog.trace(new ParameterizedMessage("[{}][{}] sent error response", requestId, action), e);
|
||||
tracerLog.trace(
|
||||
(org.apache.logging.log4j.util.Supplier<?>)
|
||||
() -> new ParameterizedMessage("[{}][{}] sent error response", requestId, action), e);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -1052,7 +1057,9 @@ public class TransportService extends AbstractLifecycleComponent {
|
||||
try {
|
||||
handler.handleException(rtx);
|
||||
} catch (Exception e) {
|
||||
logger.error(new ParameterizedMessage("failed to handle exception for action [{}], handler [{}]", action, handler), e);
|
||||
logger.error(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"failed to handle exception for action [{}], handler [{}]", action, handler), e);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -20,6 +20,7 @@
|
||||
package org.elasticsearch.transport.local;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
@ -304,7 +305,7 @@ public class LocalTransport extends AbstractLifecycleComponent implements Transp
|
||||
});
|
||||
}
|
||||
} else {
|
||||
logger.warn(new ParameterizedMessage("Failed to receive message for action [{}]", action), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("Failed to receive message for action [{}]", action), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -354,7 +355,8 @@ public class LocalTransport extends AbstractLifecycleComponent implements Transp
|
||||
} catch (Exception inner) {
|
||||
inner.addSuppressed(e);
|
||||
logger.warn(
|
||||
new ParameterizedMessage("Failed to send error message back to client for action [{}]", action), inner);
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"Failed to send error message back to client for action [{}]", action), inner);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -365,7 +367,9 @@ public class LocalTransport extends AbstractLifecycleComponent implements Transp
|
||||
transportChannel.sendResponse(e);
|
||||
} catch (Exception inner) {
|
||||
inner.addSuppressed(e);
|
||||
logger.warn(new ParameterizedMessage("Failed to send error message back to client for action [{}]", action), inner);
|
||||
logger.warn(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"Failed to send error message back to client for action [{}]", action), inner);
|
||||
}
|
||||
|
||||
}
|
||||
@ -413,7 +417,7 @@ public class LocalTransport extends AbstractLifecycleComponent implements Transp
|
||||
try {
|
||||
handler.handleException(rtx);
|
||||
} catch (Exception e) {
|
||||
logger.error(new ParameterizedMessage("failed to handle exception response [{}]", handler), e);
|
||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("failed to handle exception response [{}]", handler), e);
|
||||
}
|
||||
}
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user