Remove type casts in logging in server component (#28807)
This commit removes type-casts in logging in the server component (other components will be done later). This also adds a parameterized message test which would catch breaking-changes related to lambdas in Log4J.
This commit is contained in:
parent
4a8099c696
commit
794de63232
|
@ -20,7 +20,6 @@
|
||||||
package org.elasticsearch.action.admin.cluster.health;
|
package org.elasticsearch.action.admin.cluster.health;
|
||||||
|
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
import org.elasticsearch.action.support.ActionFilters;
|
import org.elasticsearch.action.support.ActionFilters;
|
||||||
import org.elasticsearch.action.support.ActiveShardCount;
|
import org.elasticsearch.action.support.ActiveShardCount;
|
||||||
|
@ -104,7 +103,7 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(String source, Exception e) {
|
public void onFailure(String source, Exception e) {
|
||||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
|
logger.error(() -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
|
||||||
listener.onFailure(e);
|
listener.onFailure(e);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
@ -132,7 +131,7 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(String source, Exception e) {
|
public void onFailure(String source, Exception e) {
|
||||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
|
logger.error(() -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
|
||||||
listener.onFailure(e);
|
listener.onFailure(e);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.action.admin.cluster.reroute;
|
||||||
|
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
import org.elasticsearch.action.support.ActionFilters;
|
import org.elasticsearch.action.support.ActionFilters;
|
||||||
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
||||||
|
@ -112,7 +111,7 @@ public class TransportClusterRerouteAction extends TransportMasterNodeAction<Clu
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(String source, Exception e) {
|
public void onFailure(String source, Exception e) {
|
||||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to perform [{}]", source), e);
|
logger.debug(() -> new ParameterizedMessage("failed to perform [{}]", source), e);
|
||||||
super.onFailure(source, e);
|
super.onFailure(source, e);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -20,7 +20,6 @@
|
||||||
package org.elasticsearch.action.admin.cluster.settings;
|
package org.elasticsearch.action.admin.cluster.settings;
|
||||||
|
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.elasticsearch.ElasticsearchException;
|
import org.elasticsearch.ElasticsearchException;
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
import org.elasticsearch.action.support.ActionFilters;
|
import org.elasticsearch.action.support.ActionFilters;
|
||||||
|
@ -160,7 +159,7 @@ public class TransportClusterUpdateSettingsAction extends
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(String source, Exception e) {
|
public void onFailure(String source, Exception e) {
|
||||||
//if the reroute fails we only log
|
//if the reroute fails we only log
|
||||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to perform [{}]", source), e);
|
logger.debug(() -> new ParameterizedMessage("failed to perform [{}]", source), e);
|
||||||
listener.onFailure(new ElasticsearchException("reroute after update settings failed", e));
|
listener.onFailure(new ElasticsearchException("reroute after update settings failed", e));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -174,7 +173,7 @@ public class TransportClusterUpdateSettingsAction extends
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(String source, Exception e) {
|
public void onFailure(String source, Exception e) {
|
||||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to perform [{}]", source), e);
|
logger.debug(() -> new ParameterizedMessage("failed to perform [{}]", source), e);
|
||||||
super.onFailure(source, e);
|
super.onFailure(source, e);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -20,7 +20,6 @@
|
||||||
package org.elasticsearch.action.admin.indices.close;
|
package org.elasticsearch.action.admin.indices.close;
|
||||||
|
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
import org.elasticsearch.action.support.ActionFilters;
|
import org.elasticsearch.action.support.ActionFilters;
|
||||||
import org.elasticsearch.action.support.DestructiveOperations;
|
import org.elasticsearch.action.support.DestructiveOperations;
|
||||||
|
@ -114,7 +113,7 @@ public class TransportCloseIndexAction extends TransportMasterNodeAction<CloseIn
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(Exception t) {
|
public void onFailure(Exception t) {
|
||||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to close indices [{}]", (Object) concreteIndices), t);
|
logger.debug(() -> new ParameterizedMessage("failed to close indices [{}]", (Object) concreteIndices), t);
|
||||||
listener.onFailure(t);
|
listener.onFailure(t);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
|
@ -20,7 +20,6 @@
|
||||||
package org.elasticsearch.action.admin.indices.delete;
|
package org.elasticsearch.action.admin.indices.delete;
|
||||||
|
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
import org.elasticsearch.action.support.ActionFilters;
|
import org.elasticsearch.action.support.ActionFilters;
|
||||||
import org.elasticsearch.action.support.DestructiveOperations;
|
import org.elasticsearch.action.support.DestructiveOperations;
|
||||||
|
@ -102,7 +101,7 @@ public class TransportDeleteIndexAction extends TransportMasterNodeAction<Delete
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(Exception t) {
|
public void onFailure(Exception t) {
|
||||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to delete indices [{}]", concreteIndices), t);
|
logger.debug(() -> new ParameterizedMessage("failed to delete indices [{}]", concreteIndices), t);
|
||||||
listener.onFailure(t);
|
listener.onFailure(t);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
|
@ -20,7 +20,6 @@
|
||||||
package org.elasticsearch.action.admin.indices.mapping.put;
|
package org.elasticsearch.action.admin.indices.mapping.put;
|
||||||
|
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
import org.elasticsearch.action.support.ActionFilters;
|
import org.elasticsearch.action.support.ActionFilters;
|
||||||
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
||||||
|
@ -93,12 +92,12 @@ public class TransportPutMappingAction extends TransportMasterNodeAction<PutMapp
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(Exception t) {
|
public void onFailure(Exception t) {
|
||||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to put mappings on indices [{}], type [{}]", concreteIndices, request.type()), t);
|
logger.debug(() -> new ParameterizedMessage("failed to put mappings on indices [{}], type [{}]", concreteIndices, request.type()), t);
|
||||||
listener.onFailure(t);
|
listener.onFailure(t);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
} catch (IndexNotFoundException ex) {
|
} catch (IndexNotFoundException ex) {
|
||||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to put mappings on indices [{}], type [{}]", request.indices(), request.type()), ex);
|
logger.debug(() -> new ParameterizedMessage("failed to put mappings on indices [{}], type [{}]", request.indices(), request.type()), ex);
|
||||||
throw ex;
|
throw ex;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,7 +20,6 @@
|
||||||
package org.elasticsearch.action.admin.indices.open;
|
package org.elasticsearch.action.admin.indices.open;
|
||||||
|
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
import org.elasticsearch.action.support.ActionFilters;
|
import org.elasticsearch.action.support.ActionFilters;
|
||||||
import org.elasticsearch.action.support.DestructiveOperations;
|
import org.elasticsearch.action.support.DestructiveOperations;
|
||||||
|
@ -99,7 +98,7 @@ public class TransportOpenIndexAction extends TransportMasterNodeAction<OpenInde
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(Exception t) {
|
public void onFailure(Exception t) {
|
||||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to open indices [{}]", (Object) concreteIndices), t);
|
logger.debug(() -> new ParameterizedMessage("failed to open indices [{}]", (Object) concreteIndices), t);
|
||||||
listener.onFailure(t);
|
listener.onFailure(t);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
|
@ -20,7 +20,6 @@
|
||||||
package org.elasticsearch.action.admin.indices.settings.put;
|
package org.elasticsearch.action.admin.indices.settings.put;
|
||||||
|
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
import org.elasticsearch.action.support.ActionFilters;
|
import org.elasticsearch.action.support.ActionFilters;
|
||||||
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
||||||
|
@ -94,7 +93,7 @@ public class TransportUpdateSettingsAction extends TransportMasterNodeAction<Upd
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(Exception t) {
|
public void onFailure(Exception t) {
|
||||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to update settings on indices [{}]", (Object) concreteIndices), t);
|
logger.debug(() -> new ParameterizedMessage("failed to update settings on indices [{}]", (Object) concreteIndices), t);
|
||||||
listener.onFailure(t);
|
listener.onFailure(t);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
|
@ -19,7 +19,6 @@
|
||||||
package org.elasticsearch.action.admin.indices.template.delete;
|
package org.elasticsearch.action.admin.indices.template.delete;
|
||||||
|
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
import org.elasticsearch.action.support.ActionFilters;
|
import org.elasticsearch.action.support.ActionFilters;
|
||||||
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
||||||
|
@ -75,7 +74,7 @@ public class TransportDeleteIndexTemplateAction extends TransportMasterNodeActio
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(Exception e) {
|
public void onFailure(Exception e) {
|
||||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to delete templates [{}]", request.name()), e);
|
logger.debug(() -> new ParameterizedMessage("failed to delete templates [{}]", request.name()), e);
|
||||||
listener.onFailure(e);
|
listener.onFailure(e);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
|
@ -19,7 +19,6 @@
|
||||||
package org.elasticsearch.action.admin.indices.template.put;
|
package org.elasticsearch.action.admin.indices.template.put;
|
||||||
|
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
import org.elasticsearch.action.support.ActionFilters;
|
import org.elasticsearch.action.support.ActionFilters;
|
||||||
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
||||||
|
@ -97,7 +96,7 @@ public class TransportPutIndexTemplateAction extends TransportMasterNodeAction<P
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(Exception e) {
|
public void onFailure(Exception e) {
|
||||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to put template [{}]", request.name()), e);
|
logger.debug(() -> new ParameterizedMessage("failed to put template [{}]", request.name()), e);
|
||||||
listener.onFailure(e);
|
listener.onFailure(e);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
|
@ -20,7 +20,6 @@
|
||||||
package org.elasticsearch.action.admin.indices.upgrade.post;
|
package org.elasticsearch.action.admin.indices.upgrade.post;
|
||||||
|
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
import org.elasticsearch.action.support.ActionFilters;
|
import org.elasticsearch.action.support.ActionFilters;
|
||||||
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
||||||
|
@ -78,7 +77,7 @@ public class TransportUpgradeSettingsAction extends TransportMasterNodeAction<Up
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(Exception t) {
|
public void onFailure(Exception t) {
|
||||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to upgrade minimum compatibility version settings on indices [{}]", request.versions().keySet()), t);
|
logger.debug(() -> new ParameterizedMessage("failed to upgrade minimum compatibility version settings on indices [{}]", request.versions().keySet()), t);
|
||||||
listener.onFailure(t);
|
listener.onFailure(t);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
|
@ -20,7 +20,6 @@ package org.elasticsearch.action.bulk;
|
||||||
|
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
import org.elasticsearch.common.logging.Loggers;
|
import org.elasticsearch.common.logging.Loggers;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
|
@ -89,10 +88,10 @@ public final class BulkRequestHandler {
|
||||||
}
|
}
|
||||||
} catch (InterruptedException e) {
|
} catch (InterruptedException e) {
|
||||||
Thread.currentThread().interrupt();
|
Thread.currentThread().interrupt();
|
||||||
logger.info((Supplier<?>) () -> new ParameterizedMessage("Bulk request {} has been cancelled.", executionId), e);
|
logger.info(() -> new ParameterizedMessage("Bulk request {} has been cancelled.", executionId), e);
|
||||||
listener.afterBulk(executionId, bulkRequest, e);
|
listener.afterBulk(executionId, bulkRequest, e);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("Failed to execute bulk request {}.", executionId), e);
|
logger.warn(() -> new ParameterizedMessage("Failed to execute bulk request {}.", executionId), e);
|
||||||
listener.afterBulk(executionId, bulkRequest, e);
|
listener.afterBulk(executionId, bulkRequest, e);
|
||||||
} finally {
|
} finally {
|
||||||
if (bulkRequestSetupSuccessful == false) { // if we fail on client.bulk() release the semaphore
|
if (bulkRequestSetupSuccessful == false) { // if we fail on client.bulk() release the semaphore
|
||||||
|
|
|
@ -20,7 +20,6 @@
|
||||||
package org.elasticsearch.action.bulk;
|
package org.elasticsearch.action.bulk;
|
||||||
|
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.apache.lucene.util.SparseFixedBitSet;
|
import org.apache.lucene.util.SparseFixedBitSet;
|
||||||
import org.elasticsearch.ElasticsearchParseException;
|
import org.elasticsearch.ElasticsearchParseException;
|
||||||
import org.elasticsearch.ExceptionsHelper;
|
import org.elasticsearch.ExceptionsHelper;
|
||||||
|
@ -494,7 +493,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
||||||
long ingestStartTimeInNanos = System.nanoTime();
|
long ingestStartTimeInNanos = System.nanoTime();
|
||||||
BulkRequestModifier bulkRequestModifier = new BulkRequestModifier(original);
|
BulkRequestModifier bulkRequestModifier = new BulkRequestModifier(original);
|
||||||
ingestService.getPipelineExecutionService().executeBulkRequest(() -> bulkRequestModifier, (indexRequest, exception) -> {
|
ingestService.getPipelineExecutionService().executeBulkRequest(() -> bulkRequestModifier, (indexRequest, exception) -> {
|
||||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to execute pipeline [{}] for document [{}/{}/{}]",
|
logger.debug(() -> new ParameterizedMessage("failed to execute pipeline [{}] for document [{}/{}/{}]",
|
||||||
indexRequest.getPipeline(), indexRequest.index(), indexRequest.type(), indexRequest.id()), exception);
|
indexRequest.getPipeline(), indexRequest.index(), indexRequest.type(), indexRequest.id()), exception);
|
||||||
bulkRequestModifier.markCurrentItemAsFailed(exception);
|
bulkRequestModifier.markCurrentItemAsFailed(exception);
|
||||||
}, (exception) -> {
|
}, (exception) -> {
|
||||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.action.bulk;
|
||||||
|
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.elasticsearch.ExceptionsHelper;
|
import org.elasticsearch.ExceptionsHelper;
|
||||||
import org.elasticsearch.action.DocWriteRequest;
|
import org.elasticsearch.action.DocWriteRequest;
|
||||||
import org.elasticsearch.action.DocWriteResponse;
|
import org.elasticsearch.action.DocWriteResponse;
|
||||||
|
@ -197,10 +196,10 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
||||||
DocWriteRequest docWriteRequest = replicaRequest.request();
|
DocWriteRequest docWriteRequest = replicaRequest.request();
|
||||||
Exception failure = operationResult.getFailure();
|
Exception failure = operationResult.getFailure();
|
||||||
if (isConflictException(failure)) {
|
if (isConflictException(failure)) {
|
||||||
logger.trace((Supplier<?>) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}",
|
logger.trace(() -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}",
|
||||||
request.shardId(), docWriteRequest.opType().getLowercase(), request), failure);
|
request.shardId(), docWriteRequest.opType().getLowercase(), request), failure);
|
||||||
} else {
|
} else {
|
||||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}",
|
logger.debug(() -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}",
|
||||||
request.shardId(), docWriteRequest.opType().getLowercase(), request), failure);
|
request.shardId(), docWriteRequest.opType().getLowercase(), request), failure);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -20,7 +20,6 @@
|
||||||
package org.elasticsearch.action.get;
|
package org.elasticsearch.action.get;
|
||||||
|
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.elasticsearch.ElasticsearchException;
|
import org.elasticsearch.ElasticsearchException;
|
||||||
import org.elasticsearch.action.support.ActionFilters;
|
import org.elasticsearch.action.support.ActionFilters;
|
||||||
import org.elasticsearch.action.support.TransportActions;
|
import org.elasticsearch.action.support.TransportActions;
|
||||||
|
@ -95,7 +94,7 @@ public class TransportShardMultiGetAction extends TransportSingleShardAction<Mul
|
||||||
if (TransportActions.isShardNotAvailableException(e)) {
|
if (TransportActions.isShardNotAvailableException(e)) {
|
||||||
throw (ElasticsearchException) e;
|
throw (ElasticsearchException) e;
|
||||||
} else {
|
} else {
|
||||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} failed to execute multi_get for [{}]/[{}]", shardId,
|
logger.debug(() -> new ParameterizedMessage("{} failed to execute multi_get for [{}]/[{}]", shardId,
|
||||||
item.type(), item.id()), e);
|
item.type(), item.id()), e);
|
||||||
response.add(request.locations.get(i), new MultiGetResponse.Failure(request.index(), item.type(), item.id(), e));
|
response.add(request.locations.get(i), new MultiGetResponse.Failure(request.index(), item.type(), item.id(), e));
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.action.search;
|
||||||
|
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.apache.lucene.util.SetOnce;
|
import org.apache.lucene.util.SetOnce;
|
||||||
import org.elasticsearch.ElasticsearchException;
|
import org.elasticsearch.ElasticsearchException;
|
||||||
import org.elasticsearch.ExceptionsHelper;
|
import org.elasticsearch.ExceptionsHelper;
|
||||||
|
@ -125,10 +124,7 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
|
||||||
final ShardOperationFailedException[] shardSearchFailures = ExceptionsHelper.groupBy(buildShardFailures());
|
final ShardOperationFailedException[] shardSearchFailures = ExceptionsHelper.groupBy(buildShardFailures());
|
||||||
Throwable cause = shardSearchFailures.length == 0 ? null :
|
Throwable cause = shardSearchFailures.length == 0 ? null :
|
||||||
ElasticsearchException.guessRootCauses(shardSearchFailures[0].getCause())[0];
|
ElasticsearchException.guessRootCauses(shardSearchFailures[0].getCause())[0];
|
||||||
if (logger.isDebugEnabled()) {
|
logger.debug(() -> new ParameterizedMessage("All shards failed for phase: [{}]", getName()), cause);
|
||||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("All shards failed for phase: [{}]", getName()),
|
|
||||||
cause);
|
|
||||||
}
|
|
||||||
onPhaseFailure(currentPhase, "all shards failed", cause);
|
onPhaseFailure(currentPhase, "all shards failed", cause);
|
||||||
} else {
|
} else {
|
||||||
Boolean allowPartialResults = request.allowPartialSearchResults();
|
Boolean allowPartialResults = request.allowPartialSearchResults();
|
||||||
|
@ -138,9 +134,8 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
|
||||||
final ShardOperationFailedException[] shardSearchFailures = ExceptionsHelper.groupBy(buildShardFailures());
|
final ShardOperationFailedException[] shardSearchFailures = ExceptionsHelper.groupBy(buildShardFailures());
|
||||||
Throwable cause = shardSearchFailures.length == 0 ? null :
|
Throwable cause = shardSearchFailures.length == 0 ? null :
|
||||||
ElasticsearchException.guessRootCauses(shardSearchFailures[0].getCause())[0];
|
ElasticsearchException.guessRootCauses(shardSearchFailures[0].getCause())[0];
|
||||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} shards failed for phase: [{}]",
|
logger.debug(() -> new ParameterizedMessage("{} shards failed for phase: [{}]",
|
||||||
shardSearchFailures.length, getName()),
|
shardSearchFailures.length, getName()), cause);
|
||||||
cause);
|
|
||||||
}
|
}
|
||||||
onPhaseFailure(currentPhase, "Partial shards failure", null);
|
onPhaseFailure(currentPhase, "Partial shards failure", null);
|
||||||
} else {
|
} else {
|
||||||
|
@ -160,10 +155,7 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
|
||||||
phase.run();
|
phase.run();
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
if (logger.isDebugEnabled()) {
|
if (logger.isDebugEnabled()) {
|
||||||
logger.debug(
|
logger.debug(new ParameterizedMessage("Failed to execute [{}] while moving to [{}] phase", request, phase.getName()), e);
|
||||||
(Supplier<?>) () -> new ParameterizedMessage(
|
|
||||||
"Failed to execute [{}] while moving to [{}] phase", request, phase.getName()),
|
|
||||||
e);
|
|
||||||
}
|
}
|
||||||
onPhaseFailure(phase, "", e);
|
onPhaseFailure(phase, "", e);
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,7 +20,6 @@ package org.elasticsearch.action.search;
|
||||||
|
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||||
|
@ -133,7 +132,7 @@ final class ClearScrollController implements Runnable {
|
||||||
}
|
}
|
||||||
|
|
||||||
private void onFailedFreedContext(Throwable e, DiscoveryNode node) {
|
private void onFailedFreedContext(Throwable e, DiscoveryNode node) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("Clear SC failed on node[{}]", node), e);
|
logger.warn(() -> new ParameterizedMessage("Clear SC failed on node[{}]", node), e);
|
||||||
if (expectedOps.countDown()) {
|
if (expectedOps.countDown()) {
|
||||||
listener.onResponse(new ClearScrollResponse(false, freedSearchContexts.get()));
|
listener.onResponse(new ClearScrollResponse(false, freedSearchContexts.get()));
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -19,7 +19,6 @@
|
||||||
package org.elasticsearch.action.search;
|
package org.elasticsearch.action.search;
|
||||||
|
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||||
import org.elasticsearch.search.SearchPhaseResult;
|
import org.elasticsearch.search.SearchPhaseResult;
|
||||||
import org.elasticsearch.search.SearchShardTarget;
|
import org.elasticsearch.search.SearchShardTarget;
|
||||||
|
@ -87,10 +86,8 @@ final class DfsQueryPhase extends SearchPhase {
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(Exception exception) {
|
public void onFailure(Exception exception) {
|
||||||
try {
|
try {
|
||||||
if (context.getLogger().isDebugEnabled()) {
|
context.getLogger().debug(() -> new ParameterizedMessage("[{}] Failed to execute query phase",
|
||||||
context.getLogger().debug((Supplier<?>) () -> new ParameterizedMessage("[{}] Failed to execute query phase",
|
querySearchRequest.id()), exception);
|
||||||
querySearchRequest.id()), exception);
|
|
||||||
}
|
|
||||||
counter.onFailure(shardIndex, searchShardTarget, exception);
|
counter.onFailure(shardIndex, searchShardTarget, exception);
|
||||||
} finally {
|
} finally {
|
||||||
// the query might not have been executed at all (for example because thread pool rejected
|
// the query might not have been executed at all (for example because thread pool rejected
|
||||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.action.search;
|
||||||
import com.carrotsearch.hppc.IntArrayList;
|
import com.carrotsearch.hppc.IntArrayList;
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.apache.lucene.search.ScoreDoc;
|
import org.apache.lucene.search.ScoreDoc;
|
||||||
import org.elasticsearch.action.ActionRunnable;
|
import org.elasticsearch.action.ActionRunnable;
|
||||||
import org.elasticsearch.action.OriginalIndices;
|
import org.elasticsearch.action.OriginalIndices;
|
||||||
|
@ -169,10 +168,7 @@ final class FetchSearchPhase extends SearchPhase {
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(Exception e) {
|
public void onFailure(Exception e) {
|
||||||
try {
|
try {
|
||||||
if (logger.isDebugEnabled()) {
|
logger.debug(() -> new ParameterizedMessage("[{}] Failed to execute fetch phase", fetchSearchRequest.id()), e);
|
||||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] Failed to execute fetch phase",
|
|
||||||
fetchSearchRequest.id()), e);
|
|
||||||
}
|
|
||||||
counter.onFailure(shardIndex, shardTarget, e);
|
counter.onFailure(shardIndex, shardTarget, e);
|
||||||
} finally {
|
} finally {
|
||||||
// the search context might not be cleared on the node where the fetch was executed for example
|
// the search context might not be cleared on the node where the fetch was executed for example
|
||||||
|
|
|
@ -20,7 +20,6 @@ package org.elasticsearch.action.search;
|
||||||
|
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.elasticsearch.action.NoShardAvailableActionException;
|
import org.elasticsearch.action.NoShardAvailableActionException;
|
||||||
import org.elasticsearch.action.support.TransportActions;
|
import org.elasticsearch.action.support.TransportActions;
|
||||||
import org.elasticsearch.cluster.routing.GroupShardsIterator;
|
import org.elasticsearch.cluster.routing.GroupShardsIterator;
|
||||||
|
@ -93,15 +92,10 @@ abstract class InitialSearchPhase<FirstResult extends SearchPhaseResult> extends
|
||||||
if (totalOps.incrementAndGet() == expectedTotalOps) {
|
if (totalOps.incrementAndGet() == expectedTotalOps) {
|
||||||
if (logger.isDebugEnabled()) {
|
if (logger.isDebugEnabled()) {
|
||||||
if (e != null && !TransportActions.isShardNotAvailableException(e)) {
|
if (e != null && !TransportActions.isShardNotAvailableException(e)) {
|
||||||
logger.debug(
|
logger.debug(new ParameterizedMessage(
|
||||||
(Supplier<?>) () -> new ParameterizedMessage(
|
"{}: Failed to execute [{}]", shard != null ? shard.shortSummary() : shardIt.shardId(), request), e);
|
||||||
"{}: Failed to execute [{}]",
|
|
||||||
shard != null ? shard.shortSummary() :
|
|
||||||
shardIt.shardId(),
|
|
||||||
request),
|
|
||||||
e);
|
|
||||||
} else if (logger.isTraceEnabled()) {
|
} else if (logger.isTraceEnabled()) {
|
||||||
logger.trace((Supplier<?>) () -> new ParameterizedMessage("{}: Failed to execute [{}]", shard, request), e);
|
logger.trace(new ParameterizedMessage("{}: Failed to execute [{}]", shard, request), e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
onPhaseDone();
|
onPhaseDone();
|
||||||
|
@ -109,13 +103,9 @@ abstract class InitialSearchPhase<FirstResult extends SearchPhaseResult> extends
|
||||||
final ShardRouting nextShard = shardIt.nextOrNull();
|
final ShardRouting nextShard = shardIt.nextOrNull();
|
||||||
final boolean lastShard = nextShard == null;
|
final boolean lastShard = nextShard == null;
|
||||||
// trace log this exception
|
// trace log this exception
|
||||||
logger.trace(
|
logger.trace(() -> new ParameterizedMessage(
|
||||||
(Supplier<?>) () -> new ParameterizedMessage(
|
"{}: Failed to execute [{}] lastShard [{}]",
|
||||||
"{}: Failed to execute [{}] lastShard [{}]",
|
shard != null ? shard.shortSummary() : shardIt.shardId(), request, lastShard), e);
|
||||||
shard != null ? shard.shortSummary() : shardIt.shardId(),
|
|
||||||
request,
|
|
||||||
lastShard),
|
|
||||||
e);
|
|
||||||
if (!lastShard) {
|
if (!lastShard) {
|
||||||
performPhaseOnShard(shardIndex, shardIt, nextShard);
|
performPhaseOnShard(shardIndex, shardIt, nextShard);
|
||||||
} else {
|
} else {
|
||||||
|
@ -123,14 +113,9 @@ abstract class InitialSearchPhase<FirstResult extends SearchPhaseResult> extends
|
||||||
// no more shards active, add a failure
|
// no more shards active, add a failure
|
||||||
if (logger.isDebugEnabled() && !logger.isTraceEnabled()) { // do not double log this exception
|
if (logger.isDebugEnabled() && !logger.isTraceEnabled()) { // do not double log this exception
|
||||||
if (e != null && !TransportActions.isShardNotAvailableException(e)) {
|
if (e != null && !TransportActions.isShardNotAvailableException(e)) {
|
||||||
logger.debug(
|
logger.debug(new ParameterizedMessage(
|
||||||
(Supplier<?>) () -> new ParameterizedMessage(
|
"{}: Failed to execute [{}] lastShard [{}]",
|
||||||
"{}: Failed to execute [{}] lastShard [{}]",
|
shard != null ? shard.shortSummary() : shardIt.shardId(), request, lastShard), e);
|
||||||
shard != null ? shard.shortSummary() :
|
|
||||||
shardIt.shardId(),
|
|
||||||
request,
|
|
||||||
lastShard),
|
|
||||||
e);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -91,13 +91,8 @@ public abstract class HandledTransportAction<Request extends ActionRequest, Resp
|
||||||
try {
|
try {
|
||||||
channel.sendResponse(e);
|
channel.sendResponse(e);
|
||||||
} catch (Exception e1) {
|
} catch (Exception e1) {
|
||||||
logger.warn(
|
logger.warn(() -> new ParameterizedMessage(
|
||||||
(org.apache.logging.log4j.util.Supplier<?>)
|
"Failed to send error response for action [{}] and request [{}]", actionName, request), e1);
|
||||||
() -> new ParameterizedMessage(
|
|
||||||
"Failed to send error response for action [{}] and request [{}]",
|
|
||||||
actionName,
|
|
||||||
request),
|
|
||||||
e1);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.action.support;
|
||||||
|
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
import org.elasticsearch.client.Client;
|
import org.elasticsearch.client.Client;
|
||||||
import org.elasticsearch.client.transport.TransportClient;
|
import org.elasticsearch.client.transport.TransportClient;
|
||||||
|
@ -120,8 +119,7 @@ public final class ThreadedActionListener<Response> implements ActionListener<Re
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(Exception e) {
|
public void onFailure(Exception e) {
|
||||||
logger.warn(
|
logger.warn(() -> new ParameterizedMessage("failed to execute failure callback on [{}]", listener), e);
|
||||||
(Supplier<?>) () -> new ParameterizedMessage("failed to execute failure callback on [{}]", listener), e);
|
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
|
@ -222,13 +222,8 @@ public abstract class TransportBroadcastAction<Request extends BroadcastRequest<
|
||||||
if (e != null) {
|
if (e != null) {
|
||||||
if (logger.isTraceEnabled()) {
|
if (logger.isTraceEnabled()) {
|
||||||
if (!TransportActions.isShardNotAvailableException(e)) {
|
if (!TransportActions.isShardNotAvailableException(e)) {
|
||||||
logger.trace(
|
logger.trace(new ParameterizedMessage(
|
||||||
(org.apache.logging.log4j.util.Supplier<?>)
|
"{}: failed to execute [{}]", shard != null ? shard.shortSummary() : shardIt.shardId(), request), e);
|
||||||
() -> new ParameterizedMessage(
|
|
||||||
"{}: failed to execute [{}]",
|
|
||||||
shard != null ? shard.shortSummary() : shardIt.shardId(),
|
|
||||||
request),
|
|
||||||
e);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -237,13 +232,8 @@ public abstract class TransportBroadcastAction<Request extends BroadcastRequest<
|
||||||
if (logger.isDebugEnabled()) {
|
if (logger.isDebugEnabled()) {
|
||||||
if (e != null) {
|
if (e != null) {
|
||||||
if (!TransportActions.isShardNotAvailableException(e)) {
|
if (!TransportActions.isShardNotAvailableException(e)) {
|
||||||
logger.debug(
|
logger.debug(new ParameterizedMessage(
|
||||||
(org.apache.logging.log4j.util.Supplier<?>)
|
"{}: failed to execute [{}]", shard != null ? shard.shortSummary() : shardIt.shardId(), request), e);
|
||||||
() -> new ParameterizedMessage(
|
|
||||||
"{}: failed to execute [{}]",
|
|
||||||
shard != null ? shard.shortSummary() : shardIt.shardId(),
|
|
||||||
request),
|
|
||||||
e);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -362,9 +362,7 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
|
||||||
protected void onNodeFailure(DiscoveryNode node, int nodeIndex, Throwable t) {
|
protected void onNodeFailure(DiscoveryNode node, int nodeIndex, Throwable t) {
|
||||||
String nodeId = node.getId();
|
String nodeId = node.getId();
|
||||||
if (logger.isDebugEnabled() && !(t instanceof NodeShouldNotConnectException)) {
|
if (logger.isDebugEnabled() && !(t instanceof NodeShouldNotConnectException)) {
|
||||||
logger.debug(
|
logger.debug(new ParameterizedMessage("failed to execute [{}] on node [{}]", actionName, nodeId), t);
|
||||||
(org.apache.logging.log4j.util.Supplier<?>)
|
|
||||||
() -> new ParameterizedMessage("failed to execute [{}] on node [{}]", actionName, nodeId), t);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// this is defensive to protect against the possibility of double invocation
|
// this is defensive to protect against the possibility of double invocation
|
||||||
|
@ -441,23 +439,13 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
|
||||||
shardResults[shardIndex] = failure;
|
shardResults[shardIndex] = failure;
|
||||||
if (TransportActions.isShardNotAvailableException(e)) {
|
if (TransportActions.isShardNotAvailableException(e)) {
|
||||||
if (logger.isTraceEnabled()) {
|
if (logger.isTraceEnabled()) {
|
||||||
logger.trace(
|
logger.trace(new ParameterizedMessage(
|
||||||
(org.apache.logging.log4j.util.Supplier<?>)
|
"[{}] failed to execute operation for shard [{}]", actionName, shardRouting.shortSummary()), e);
|
||||||
() -> new ParameterizedMessage(
|
|
||||||
"[{}] failed to execute operation for shard [{}]",
|
|
||||||
actionName,
|
|
||||||
shardRouting.shortSummary()),
|
|
||||||
e);
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (logger.isDebugEnabled()) {
|
if (logger.isDebugEnabled()) {
|
||||||
logger.debug(
|
logger.debug(new ParameterizedMessage(
|
||||||
(org.apache.logging.log4j.util.Supplier<?>)
|
"[{}] failed to execute operation for shard [{}]", actionName, shardRouting.shortSummary()), e);
|
||||||
() -> new ParameterizedMessage(
|
|
||||||
"[{}] failed to execute operation for shard [{}]",
|
|
||||||
actionName,
|
|
||||||
shardRouting.shortSummary()),
|
|
||||||
e);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -171,7 +171,7 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
|
||||||
public void onFailure(Exception t) {
|
public void onFailure(Exception t) {
|
||||||
if (t instanceof Discovery.FailedToCommitClusterStateException
|
if (t instanceof Discovery.FailedToCommitClusterStateException
|
||||||
|| (t instanceof NotMasterException)) {
|
|| (t instanceof NotMasterException)) {
|
||||||
logger.debug((org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage("master could not publish cluster state or stepped down before publishing action [{}], scheduling a retry", actionName), t);
|
logger.debug(() -> new ParameterizedMessage("master could not publish cluster state or stepped down before publishing action [{}], scheduling a retry", actionName), t);
|
||||||
retry(t, masterChangePredicate);
|
retry(t, masterChangePredicate);
|
||||||
} else {
|
} else {
|
||||||
listener.onFailure(t);
|
listener.onFailure(t);
|
||||||
|
@ -226,7 +226,7 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onTimeout(TimeValue timeout) {
|
public void onTimeout(TimeValue timeout) {
|
||||||
logger.debug((org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage("timed out while retrying [{}] after failure (timeout [{}])", actionName, timeout), failure);
|
logger.debug(() -> new ParameterizedMessage("timed out while retrying [{}] after failure (timeout [{}])", actionName, timeout), failure);
|
||||||
listener.onFailure(new MasterNotDiscoveredException(failure));
|
listener.onFailure(new MasterNotDiscoveredException(failure));
|
||||||
}
|
}
|
||||||
}, statePredicate
|
}, statePredicate
|
||||||
|
|
|
@ -232,9 +232,7 @@ public abstract class TransportNodesAction<NodesRequest extends BaseNodesRequest
|
||||||
|
|
||||||
private void onFailure(int idx, String nodeId, Throwable t) {
|
private void onFailure(int idx, String nodeId, Throwable t) {
|
||||||
if (logger.isDebugEnabled() && !(t instanceof NodeShouldNotConnectException)) {
|
if (logger.isDebugEnabled() && !(t instanceof NodeShouldNotConnectException)) {
|
||||||
logger.debug(
|
logger.debug(new ParameterizedMessage("failed to execute on node [{}]", nodeId), t);
|
||||||
(org.apache.logging.log4j.util.Supplier<?>)
|
|
||||||
() -> new ParameterizedMessage("failed to execute on node [{}]", nodeId), t);
|
|
||||||
}
|
}
|
||||||
responses.set(idx, new FailedNodeException(nodeId, "Failed node [" + nodeId + "]", t));
|
responses.set(idx, new FailedNodeException(nodeId, "Failed node [" + nodeId + "]", t));
|
||||||
if (counter.incrementAndGet() == responses.length()) {
|
if (counter.incrementAndGet() == responses.length()) {
|
||||||
|
|
|
@ -178,7 +178,7 @@ public class ReplicationOperation<
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(Exception replicaException) {
|
public void onFailure(Exception replicaException) {
|
||||||
logger.trace((org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage(
|
logger.trace(() -> new ParameterizedMessage(
|
||||||
"[{}] failure while performing [{}] on replica {}, request [{}]",
|
"[{}] failure while performing [{}] on replica {}, request [{}]",
|
||||||
shard.shardId(), opType, shard, replicaRequest), replicaException);
|
shard.shardId(), opType, shard, replicaRequest), replicaException);
|
||||||
// Only report "critical" exceptions - TODO: Reach out to the master node to get the latest shard state then report.
|
// Only report "critical" exceptions - TODO: Reach out to the master node to get the latest shard state then report.
|
||||||
|
|
|
@ -265,9 +265,7 @@ public abstract class TransportReplicationAction<
|
||||||
channel.sendResponse(e);
|
channel.sendResponse(e);
|
||||||
} catch (Exception inner) {
|
} catch (Exception inner) {
|
||||||
inner.addSuppressed(e);
|
inner.addSuppressed(e);
|
||||||
logger.warn(
|
logger.warn(() -> new ParameterizedMessage("Failed to send response for {}", actionName), inner);
|
||||||
(org.apache.logging.log4j.util.Supplier<?>)
|
|
||||||
() -> new ParameterizedMessage("Failed to send response for {}", actionName), inner);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
@ -579,7 +577,6 @@ public abstract class TransportReplicationAction<
|
||||||
public void onFailure(Exception e) {
|
public void onFailure(Exception e) {
|
||||||
if (e instanceof RetryOnReplicaException) {
|
if (e instanceof RetryOnReplicaException) {
|
||||||
logger.trace(
|
logger.trace(
|
||||||
(org.apache.logging.log4j.util.Supplier<?>)
|
|
||||||
() -> new ParameterizedMessage(
|
() -> new ParameterizedMessage(
|
||||||
"Retrying operation on replica, action [{}], request [{}]",
|
"Retrying operation on replica, action [{}], request [{}]",
|
||||||
transportReplicaAction,
|
transportReplicaAction,
|
||||||
|
@ -621,12 +618,8 @@ public abstract class TransportReplicationAction<
|
||||||
channel.sendResponse(e);
|
channel.sendResponse(e);
|
||||||
} catch (IOException responseException) {
|
} catch (IOException responseException) {
|
||||||
responseException.addSuppressed(e);
|
responseException.addSuppressed(e);
|
||||||
logger.warn(
|
logger.warn(() -> new ParameterizedMessage(
|
||||||
(org.apache.logging.log4j.util.Supplier<?>)
|
"failed to send error message back to client for action [{}]", transportReplicaAction), responseException);
|
||||||
() -> new ParameterizedMessage(
|
|
||||||
"failed to send error message back to client for action [{}]",
|
|
||||||
transportReplicaAction),
|
|
||||||
responseException);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -854,12 +847,9 @@ public abstract class TransportReplicationAction<
|
||||||
final Throwable cause = exp.unwrapCause();
|
final Throwable cause = exp.unwrapCause();
|
||||||
if (cause instanceof ConnectTransportException || cause instanceof NodeClosedException ||
|
if (cause instanceof ConnectTransportException || cause instanceof NodeClosedException ||
|
||||||
(isPrimaryAction && retryPrimaryException(cause))) {
|
(isPrimaryAction && retryPrimaryException(cause))) {
|
||||||
logger.trace(
|
logger.trace(() -> new ParameterizedMessage(
|
||||||
(org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage(
|
|
||||||
"received an error from node [{}] for request [{}], scheduling a retry",
|
"received an error from node [{}] for request [{}], scheduling a retry",
|
||||||
node.getId(),
|
node.getId(), requestToPerform), exp);
|
||||||
requestToPerform),
|
|
||||||
exp);
|
|
||||||
retry(exp);
|
retry(exp);
|
||||||
} else {
|
} else {
|
||||||
finishAsFailed(exp);
|
finishAsFailed(exp);
|
||||||
|
@ -903,9 +893,7 @@ public abstract class TransportReplicationAction<
|
||||||
void finishAsFailed(Exception failure) {
|
void finishAsFailed(Exception failure) {
|
||||||
if (finished.compareAndSet(false, true)) {
|
if (finished.compareAndSet(false, true)) {
|
||||||
setPhase(task, "failed");
|
setPhase(task, "failed");
|
||||||
logger.trace(
|
logger.trace(() -> new ParameterizedMessage("operation failed. action [{}], request [{}]", actionName, request), failure);
|
||||||
(org.apache.logging.log4j.util.Supplier<?>)
|
|
||||||
() -> new ParameterizedMessage("operation failed. action [{}], request [{}]", actionName, request), failure);
|
|
||||||
listener.onFailure(failure);
|
listener.onFailure(failure);
|
||||||
} else {
|
} else {
|
||||||
assert false : "finishAsFailed called but operation is already finished";
|
assert false : "finishAsFailed called but operation is already finished";
|
||||||
|
@ -913,13 +901,9 @@ public abstract class TransportReplicationAction<
|
||||||
}
|
}
|
||||||
|
|
||||||
void finishWithUnexpectedFailure(Exception failure) {
|
void finishWithUnexpectedFailure(Exception failure) {
|
||||||
logger.warn(
|
logger.warn(() -> new ParameterizedMessage(
|
||||||
(org.apache.logging.log4j.util.Supplier<?>)
|
|
||||||
() -> new ParameterizedMessage(
|
|
||||||
"unexpected error during the primary phase for action [{}], request [{}]",
|
"unexpected error during the primary phase for action [{}], request [{}]",
|
||||||
actionName,
|
actionName, request), failure);
|
||||||
request),
|
|
||||||
failure);
|
|
||||||
if (finished.compareAndSet(false, true)) {
|
if (finished.compareAndSet(false, true)) {
|
||||||
setPhase(task, "failed");
|
setPhase(task, "failed");
|
||||||
listener.onFailure(failure);
|
listener.onFailure(failure);
|
||||||
|
|
|
@ -204,10 +204,8 @@ public abstract class TransportSingleShardAction<Request extends SingleShardRequ
|
||||||
}
|
}
|
||||||
|
|
||||||
private void onFailure(ShardRouting shardRouting, Exception e) {
|
private void onFailure(ShardRouting shardRouting, Exception e) {
|
||||||
if (logger.isTraceEnabled() && e != null) {
|
if (e != null) {
|
||||||
logger.trace(
|
logger.trace(() -> new ParameterizedMessage("{}: failed to execute [{}]", shardRouting, internalRequest.request()), e);
|
||||||
(org.apache.logging.log4j.util.Supplier<?>)
|
|
||||||
() -> new ParameterizedMessage("{}: failed to execute [{}]", shardRouting, internalRequest.request()), e);
|
|
||||||
}
|
}
|
||||||
perform(e);
|
perform(e);
|
||||||
}
|
}
|
||||||
|
@ -224,11 +222,7 @@ public abstract class TransportSingleShardAction<Request extends SingleShardRequ
|
||||||
if (failure == null || isShardNotAvailableException(failure)) {
|
if (failure == null || isShardNotAvailableException(failure)) {
|
||||||
failure = new NoShardAvailableActionException(null, LoggerMessageFormat.format("No shard available for [{}]", internalRequest.request()), failure);
|
failure = new NoShardAvailableActionException(null, LoggerMessageFormat.format("No shard available for [{}]", internalRequest.request()), failure);
|
||||||
} else {
|
} else {
|
||||||
if (logger.isDebugEnabled()) {
|
logger.debug(() -> new ParameterizedMessage("{}: failed to execute [{}]", null, internalRequest.request()), failure);
|
||||||
logger.debug(
|
|
||||||
(org.apache.logging.log4j.util.Supplier<?>)
|
|
||||||
() -> new ParameterizedMessage("{}: failed to execute [{}]", null, internalRequest.request()), failure);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
listener.onFailure(failure);
|
listener.onFailure(failure);
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -315,9 +315,7 @@ public abstract class TransportTasksAction<
|
||||||
|
|
||||||
private void onFailure(int idx, String nodeId, Throwable t) {
|
private void onFailure(int idx, String nodeId, Throwable t) {
|
||||||
if (logger.isDebugEnabled() && !(t instanceof NodeShouldNotConnectException)) {
|
if (logger.isDebugEnabled() && !(t instanceof NodeShouldNotConnectException)) {
|
||||||
logger.debug(
|
logger.debug(new ParameterizedMessage("failed to execute on node [{}]", nodeId), t);
|
||||||
(org.apache.logging.log4j.util.Supplier<?>)
|
|
||||||
() -> new ParameterizedMessage("failed to execute on node [{}]", nodeId), t);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
responses.set(idx, new FailedNodeException(nodeId, "Failed node [" + nodeId + "]", t));
|
responses.set(idx, new FailedNodeException(nodeId, "Failed node [" + nodeId + "]", t));
|
||||||
|
|
|
@ -20,7 +20,6 @@
|
||||||
package org.elasticsearch.action.termvectors;
|
package org.elasticsearch.action.termvectors;
|
||||||
|
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.elasticsearch.ElasticsearchException;
|
import org.elasticsearch.ElasticsearchException;
|
||||||
import org.elasticsearch.action.support.ActionFilters;
|
import org.elasticsearch.action.support.ActionFilters;
|
||||||
import org.elasticsearch.action.support.TransportActions;
|
import org.elasticsearch.action.support.TransportActions;
|
||||||
|
@ -89,7 +88,7 @@ public class TransportShardMultiTermsVectorAction extends TransportSingleShardAc
|
||||||
if (TransportActions.isShardNotAvailableException(t)) {
|
if (TransportActions.isShardNotAvailableException(t)) {
|
||||||
throw (ElasticsearchException) t;
|
throw (ElasticsearchException) t;
|
||||||
} else {
|
} else {
|
||||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} failed to execute multi term vectors for [{}]/[{}]", shardId, termVectorsRequest.type(), termVectorsRequest.id()), t);
|
logger.debug(() -> new ParameterizedMessage("{} failed to execute multi term vectors for [{}]/[{}]", shardId, termVectorsRequest.type(), termVectorsRequest.id()), t);
|
||||||
response.add(request.locations.get(i),
|
response.add(request.locations.get(i),
|
||||||
new MultiTermVectorsResponse.Failure(request.index(), termVectorsRequest.type(), termVectorsRequest.id(), t));
|
new MultiTermVectorsResponse.Failure(request.index(), termVectorsRequest.type(), termVectorsRequest.id(), t));
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.bootstrap;
|
||||||
|
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.apache.lucene.util.Constants;
|
import org.apache.lucene.util.Constants;
|
||||||
import org.elasticsearch.common.SuppressForbidden;
|
import org.elasticsearch.common.SuppressForbidden;
|
||||||
import org.elasticsearch.common.io.PathUtils;
|
import org.elasticsearch.common.io.PathUtils;
|
||||||
|
@ -428,15 +427,11 @@ final class BootstrapChecks {
|
||||||
try {
|
try {
|
||||||
return parseProcSysVmMaxMapCount(rawProcSysVmMaxMapCount);
|
return parseProcSysVmMaxMapCount(rawProcSysVmMaxMapCount);
|
||||||
} catch (final NumberFormatException e) {
|
} catch (final NumberFormatException e) {
|
||||||
logger.warn(
|
logger.warn(() -> new ParameterizedMessage("unable to parse vm.max_map_count [{}]", rawProcSysVmMaxMapCount), e);
|
||||||
(Supplier<?>) () -> new ParameterizedMessage(
|
|
||||||
"unable to parse vm.max_map_count [{}]",
|
|
||||||
rawProcSysVmMaxMapCount),
|
|
||||||
e);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} catch (final IOException e) {
|
} catch (final IOException e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("I/O exception while trying to read [{}]", path), e);
|
logger.warn(() -> new ParameterizedMessage("I/O exception while trying to read [{}]", path), e);
|
||||||
}
|
}
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
|
@ -71,15 +71,12 @@ class ElasticsearchUncaughtExceptionHandler implements Thread.UncaughtExceptionH
|
||||||
|
|
||||||
void onFatalUncaught(final String threadName, final Throwable t) {
|
void onFatalUncaught(final String threadName, final Throwable t) {
|
||||||
final Logger logger = Loggers.getLogger(ElasticsearchUncaughtExceptionHandler.class, loggingPrefixSupplier.get());
|
final Logger logger = Loggers.getLogger(ElasticsearchUncaughtExceptionHandler.class, loggingPrefixSupplier.get());
|
||||||
logger.error(
|
logger.error(() -> new ParameterizedMessage("fatal error in thread [{}], exiting", threadName), t);
|
||||||
(org.apache.logging.log4j.util.Supplier<?>)
|
|
||||||
() -> new ParameterizedMessage("fatal error in thread [{}], exiting", threadName), t);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void onNonFatalUncaught(final String threadName, final Throwable t) {
|
void onNonFatalUncaught(final String threadName, final Throwable t) {
|
||||||
final Logger logger = Loggers.getLogger(ElasticsearchUncaughtExceptionHandler.class, loggingPrefixSupplier.get());
|
final Logger logger = Loggers.getLogger(ElasticsearchUncaughtExceptionHandler.class, loggingPrefixSupplier.get());
|
||||||
logger.warn((org.apache.logging.log4j.util.Supplier<?>)
|
logger.warn(() -> new ParameterizedMessage("uncaught exception in thread [{}]", threadName), t);
|
||||||
() -> new ParameterizedMessage("uncaught exception in thread [{}]", threadName), t);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void halt(int status) {
|
void halt(int status) {
|
||||||
|
|
|
@ -373,7 +373,7 @@ final class TransportClientNodesService extends AbstractComponent implements Clo
|
||||||
transportService.connectToNode(node);
|
transportService.connectToNode(node);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
it.remove();
|
it.remove();
|
||||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to connect to discovered node [{}]", node), e);
|
logger.debug(() -> new ParameterizedMessage("failed to connect to discovered node [{}]", node), e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -428,13 +428,10 @@ final class TransportClientNodesService extends AbstractComponent implements Clo
|
||||||
nodeWithInfo.getAttributes(), nodeWithInfo.getRoles(), nodeWithInfo.getVersion()));
|
nodeWithInfo.getAttributes(), nodeWithInfo.getRoles(), nodeWithInfo.getVersion()));
|
||||||
}
|
}
|
||||||
} catch (ConnectTransportException e) {
|
} catch (ConnectTransportException e) {
|
||||||
logger.debug(
|
logger.debug(() -> new ParameterizedMessage("failed to connect to node [{}], ignoring...", listedNode), e);
|
||||||
(Supplier<?>)
|
|
||||||
() -> new ParameterizedMessage("failed to connect to node [{}], ignoring...", listedNode), e);
|
|
||||||
hostFailureListener.onNodeDisconnected(listedNode, e);
|
hostFailureListener.onNodeDisconnected(listedNode, e);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.info(
|
logger.info(() -> new ParameterizedMessage("failed to get node info for {}, disconnecting...", listedNode), e);
|
||||||
(Supplier<?>) () -> new ParameterizedMessage("failed to get node info for {}, disconnecting...", listedNode), e);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -481,12 +478,10 @@ final class TransportClientNodesService extends AbstractComponent implements Clo
|
||||||
public void onFailure(Exception e) {
|
public void onFailure(Exception e) {
|
||||||
onDone();
|
onDone();
|
||||||
if (e instanceof ConnectTransportException) {
|
if (e instanceof ConnectTransportException) {
|
||||||
logger.debug((Supplier<?>)
|
logger.debug(() -> new ParameterizedMessage("failed to connect to node [{}], ignoring...", nodeToPing), e);
|
||||||
() -> new ParameterizedMessage("failed to connect to node [{}], ignoring...", nodeToPing), e);
|
|
||||||
hostFailureListener.onNodeDisconnected(nodeToPing, e);
|
hostFailureListener.onNodeDisconnected(nodeToPing, e);
|
||||||
} else {
|
} else {
|
||||||
logger.info(
|
logger.info(() -> new ParameterizedMessage(
|
||||||
(Supplier<?>) () -> new ParameterizedMessage(
|
|
||||||
"failed to get local cluster state info for {}, disconnecting...", nodeToPing), e);
|
"failed to get local cluster state info for {}, disconnecting...", nodeToPing), e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -530,8 +525,7 @@ final class TransportClientNodesService extends AbstractComponent implements Clo
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void handleException(TransportException e) {
|
public void handleException(TransportException e) {
|
||||||
logger.info(
|
logger.info(() -> new ParameterizedMessage(
|
||||||
(Supplier<?>) () -> new ParameterizedMessage(
|
|
||||||
"failed to get local cluster state for {}, disconnecting...", nodeToPing), e);
|
"failed to get local cluster state for {}, disconnecting...", nodeToPing), e);
|
||||||
try {
|
try {
|
||||||
hostFailureListener.onNodeDisconnected(nodeToPing, e);
|
hostFailureListener.onNodeDisconnected(nodeToPing, e);
|
||||||
|
|
|
@ -19,7 +19,6 @@
|
||||||
package org.elasticsearch.cluster;
|
package org.elasticsearch.cluster;
|
||||||
|
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||||
|
@ -98,7 +97,7 @@ public class NodeConnectionsService extends AbstractLifecycleComponent {
|
||||||
// will try again after `cluster.nodes.reconnect_interval` on all nodes but the current master.
|
// will try again after `cluster.nodes.reconnect_interval` on all nodes but the current master.
|
||||||
// On the master, node fault detection will remove these nodes from the cluster as their are not
|
// On the master, node fault detection will remove these nodes from the cluster as their are not
|
||||||
// connected. Note that it is very rare that we end up here on the master.
|
// connected. Note that it is very rare that we end up here on the master.
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to connect to {}", node), e);
|
logger.warn(() -> new ParameterizedMessage("failed to connect to {}", node), e);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -137,7 +136,7 @@ public class NodeConnectionsService extends AbstractLifecycleComponent {
|
||||||
try {
|
try {
|
||||||
transportService.disconnectFromNode(node);
|
transportService.disconnectFromNode(node);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to disconnect to node [{}]", node), e);
|
logger.warn(() -> new ParameterizedMessage("failed to disconnect to node [{}]", node), e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -160,9 +159,7 @@ public class NodeConnectionsService extends AbstractLifecycleComponent {
|
||||||
// log every 6th failure
|
// log every 6th failure
|
||||||
if ((nodeFailureCount % 6) == 1) {
|
if ((nodeFailureCount % 6) == 1) {
|
||||||
final int finalNodeFailureCount = nodeFailureCount;
|
final int finalNodeFailureCount = nodeFailureCount;
|
||||||
logger.warn(
|
logger.warn(() -> new ParameterizedMessage(
|
||||||
(Supplier<?>)
|
|
||||||
() -> new ParameterizedMessage(
|
|
||||||
"failed to connect to node {} (tried [{}] times)", node, finalNodeFailureCount), e);
|
"failed to connect to node {} (tried [{}] times)", node, finalNodeFailureCount), e);
|
||||||
}
|
}
|
||||||
nodes.put(node, nodeFailureCount);
|
nodes.put(node, nodeFailureCount);
|
||||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.cluster.action.shard;
|
||||||
|
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.elasticsearch.ElasticsearchException;
|
import org.elasticsearch.ElasticsearchException;
|
||||||
import org.elasticsearch.ExceptionsHelper;
|
import org.elasticsearch.ExceptionsHelper;
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
|
@ -205,7 +204,7 @@ public class ShardStateAction extends AbstractComponent {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void messageReceived(FailedShardEntry request, TransportChannel channel) throws Exception {
|
public void messageReceived(FailedShardEntry request, TransportChannel channel) throws Exception {
|
||||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} received shard failed for {}", request.shardId, request), request.failure);
|
logger.debug(() -> new ParameterizedMessage("{} received shard failed for {}", request.shardId, request), request.failure);
|
||||||
clusterService.submitStateUpdateTask(
|
clusterService.submitStateUpdateTask(
|
||||||
"shard-failed",
|
"shard-failed",
|
||||||
request,
|
request,
|
||||||
|
@ -214,12 +213,12 @@ public class ShardStateAction extends AbstractComponent {
|
||||||
new ClusterStateTaskListener() {
|
new ClusterStateTaskListener() {
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(String source, Exception e) {
|
public void onFailure(String source, Exception e) {
|
||||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("{} unexpected failure while failing shard [{}]", request.shardId, request), e);
|
logger.error(() -> new ParameterizedMessage("{} unexpected failure while failing shard [{}]", request.shardId, request), e);
|
||||||
try {
|
try {
|
||||||
channel.sendResponse(e);
|
channel.sendResponse(e);
|
||||||
} catch (Exception channelException) {
|
} catch (Exception channelException) {
|
||||||
channelException.addSuppressed(e);
|
channelException.addSuppressed(e);
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("{} failed to send failure [{}] while failing shard [{}]", request.shardId, e, request), channelException);
|
logger.warn(() -> new ParameterizedMessage("{} failed to send failure [{}] while failing shard [{}]", request.shardId, e, request), channelException);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -229,7 +228,7 @@ public class ShardStateAction extends AbstractComponent {
|
||||||
try {
|
try {
|
||||||
channel.sendResponse(new NotMasterException(source));
|
channel.sendResponse(new NotMasterException(source));
|
||||||
} catch (Exception channelException) {
|
} catch (Exception channelException) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("{} failed to send no longer master while failing shard [{}]", request.shardId, request), channelException);
|
logger.warn(() -> new ParameterizedMessage("{} failed to send no longer master while failing shard [{}]", request.shardId, request), channelException);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -238,7 +237,7 @@ public class ShardStateAction extends AbstractComponent {
|
||||||
try {
|
try {
|
||||||
channel.sendResponse(TransportResponse.Empty.INSTANCE);
|
channel.sendResponse(TransportResponse.Empty.INSTANCE);
|
||||||
} catch (Exception channelException) {
|
} catch (Exception channelException) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("{} failed to send response while failing shard [{}]", request.shardId, request), channelException);
|
logger.warn(() -> new ParameterizedMessage("{} failed to send response while failing shard [{}]", request.shardId, request), channelException);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -323,7 +322,7 @@ public class ShardStateAction extends AbstractComponent {
|
||||||
maybeUpdatedState = applyFailedShards(currentState, failedShardsToBeApplied, staleShardsToBeApplied);
|
maybeUpdatedState = applyFailedShards(currentState, failedShardsToBeApplied, staleShardsToBeApplied);
|
||||||
batchResultBuilder.successes(tasksToBeApplied);
|
batchResultBuilder.successes(tasksToBeApplied);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to apply failed shards {}", failedShardsToBeApplied), e);
|
logger.warn(() -> new ParameterizedMessage("failed to apply failed shards {}", failedShardsToBeApplied), e);
|
||||||
// failures are communicated back to the requester
|
// failures are communicated back to the requester
|
||||||
// cluster state will not be updated in this case
|
// cluster state will not be updated in this case
|
||||||
batchResultBuilder.failures(tasksToBeApplied, e);
|
batchResultBuilder.failures(tasksToBeApplied, e);
|
||||||
|
@ -501,7 +500,7 @@ public class ShardStateAction extends AbstractComponent {
|
||||||
maybeUpdatedState = allocationService.applyStartedShards(currentState, shardRoutingsToBeApplied);
|
maybeUpdatedState = allocationService.applyStartedShards(currentState, shardRoutingsToBeApplied);
|
||||||
builder.successes(tasksToBeApplied);
|
builder.successes(tasksToBeApplied);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to apply started shards {}", shardRoutingsToBeApplied), e);
|
logger.warn(() -> new ParameterizedMessage("failed to apply started shards {}", shardRoutingsToBeApplied), e);
|
||||||
builder.failures(tasksToBeApplied, e);
|
builder.failures(tasksToBeApplied, e);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -510,7 +509,7 @@ public class ShardStateAction extends AbstractComponent {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(String source, Exception e) {
|
public void onFailure(String source, Exception e) {
|
||||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
|
logger.error(() -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -23,7 +23,6 @@ import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.apache.lucene.util.CollectionUtil;
|
import org.apache.lucene.util.CollectionUtil;
|
||||||
import org.elasticsearch.ElasticsearchException;
|
import org.elasticsearch.ElasticsearchException;
|
||||||
import org.elasticsearch.ResourceAlreadyExistsException;
|
import org.elasticsearch.ResourceAlreadyExistsException;
|
||||||
|
@ -558,9 +557,9 @@ public class MetaDataCreateIndexService extends AbstractComponent {
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(String source, Exception e) {
|
public void onFailure(String source, Exception e) {
|
||||||
if (e instanceof ResourceAlreadyExistsException) {
|
if (e instanceof ResourceAlreadyExistsException) {
|
||||||
logger.trace((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to create", request.index()), e);
|
logger.trace(() -> new ParameterizedMessage("[{}] failed to create", request.index()), e);
|
||||||
} else {
|
} else {
|
||||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to create", request.index()), e);
|
logger.debug(() -> new ParameterizedMessage("[{}] failed to create", request.index()), e);
|
||||||
}
|
}
|
||||||
super.onFailure(source, e);
|
super.onFailure(source, e);
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,7 +19,6 @@
|
||||||
package org.elasticsearch.cluster.metadata;
|
package org.elasticsearch.cluster.metadata;
|
||||||
|
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.apache.lucene.analysis.Analyzer;
|
import org.apache.lucene.analysis.Analyzer;
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.common.component.AbstractComponent;
|
import org.elasticsearch.common.component.AbstractComponent;
|
||||||
|
@ -208,7 +207,7 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
|
||||||
final Settings upgrade = indexScopedSettings.archiveUnknownOrInvalidSettings(
|
final Settings upgrade = indexScopedSettings.archiveUnknownOrInvalidSettings(
|
||||||
settings,
|
settings,
|
||||||
e -> logger.warn("{} ignoring unknown index setting: [{}] with value [{}]; archiving", indexMetaData.getIndex(), e.getKey(), e.getValue()),
|
e -> logger.warn("{} ignoring unknown index setting: [{}] with value [{}]; archiving", indexMetaData.getIndex(), e.getKey(), e.getValue()),
|
||||||
(e, ex) -> logger.warn((Supplier<?>) () -> new ParameterizedMessage("{} ignoring invalid index setting: [{}] with value [{}]; archiving", indexMetaData.getIndex(), e.getKey(), e.getValue()), ex));
|
(e, ex) -> logger.warn(() -> new ParameterizedMessage("{} ignoring invalid index setting: [{}] with value [{}]; archiving", indexMetaData.getIndex(), e.getKey(), e.getValue()), ex));
|
||||||
if (upgrade != settings) {
|
if (upgrade != settings) {
|
||||||
return IndexMetaData.builder(indexMetaData).settings(upgrade).build();
|
return IndexMetaData.builder(indexMetaData).settings(upgrade).build();
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -191,7 +191,7 @@ public class MetaDataMappingService extends AbstractComponent {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to refresh-mapping in cluster state", index), e);
|
logger.warn(() -> new ParameterizedMessage("[{}] failed to refresh-mapping in cluster state", index), e);
|
||||||
}
|
}
|
||||||
return dirty;
|
return dirty;
|
||||||
}
|
}
|
||||||
|
@ -205,7 +205,7 @@ public class MetaDataMappingService extends AbstractComponent {
|
||||||
refreshTask,
|
refreshTask,
|
||||||
ClusterStateTaskConfig.build(Priority.HIGH),
|
ClusterStateTaskConfig.build(Priority.HIGH),
|
||||||
refreshExecutor,
|
refreshExecutor,
|
||||||
(source, e) -> logger.warn((Supplier<?>) () -> new ParameterizedMessage("failure during [{}]", source), e)
|
(source, e) -> logger.warn(() -> new ParameterizedMessage("failure during [{}]", source), e)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -20,7 +20,6 @@
|
||||||
package org.elasticsearch.cluster.routing;
|
package org.elasticsearch.cluster.routing;
|
||||||
|
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||||
import org.elasticsearch.cluster.ClusterState;
|
import org.elasticsearch.cluster.ClusterState;
|
||||||
import org.elasticsearch.cluster.ClusterStateUpdateTask;
|
import org.elasticsearch.cluster.ClusterStateUpdateTask;
|
||||||
|
@ -109,16 +108,16 @@ public class RoutingService extends AbstractLifecycleComponent {
|
||||||
rerouting.set(false);
|
rerouting.set(false);
|
||||||
ClusterState state = clusterService.state();
|
ClusterState state = clusterService.state();
|
||||||
if (logger.isTraceEnabled()) {
|
if (logger.isTraceEnabled()) {
|
||||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}], current state:\n{}", source, state), e);
|
logger.error(() -> new ParameterizedMessage("unexpected failure during [{}], current state:\n{}", source, state), e);
|
||||||
} else {
|
} else {
|
||||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}], current state version [{}]", source, state.version()), e);
|
logger.error(() -> new ParameterizedMessage("unexpected failure during [{}], current state version [{}]", source, state.version()), e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
rerouting.set(false);
|
rerouting.set(false);
|
||||||
ClusterState state = clusterService.state();
|
ClusterState state = clusterService.state();
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to reroute routing table, current state:\n{}", state), e);
|
logger.warn(() -> new ParameterizedMessage("failed to reroute routing table, current state:\n{}", state), e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -316,7 +316,7 @@ public class ClusterApplierService extends AbstractLifecycleComponent implements
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onNewClusterState(final String source, final java.util.function.Supplier<ClusterState> clusterStateSupplier,
|
public void onNewClusterState(final String source, final Supplier<ClusterState> clusterStateSupplier,
|
||||||
final ClusterStateTaskListener listener) {
|
final ClusterStateTaskListener listener) {
|
||||||
Function<ClusterState, ClusterState> applyFunction = currentState -> {
|
Function<ClusterState, ClusterState> applyFunction = currentState -> {
|
||||||
ClusterState nextState = clusterStateSupplier.get();
|
ClusterState nextState = clusterStateSupplier.get();
|
||||||
|
@ -401,7 +401,7 @@ public class ClusterApplierService extends AbstractLifecycleComponent implements
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS)));
|
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS)));
|
||||||
if (logger.isTraceEnabled()) {
|
if (logger.isTraceEnabled()) {
|
||||||
logger.trace(new ParameterizedMessage(
|
logger.trace(() -> new ParameterizedMessage(
|
||||||
"failed to execute cluster state applier in [{}], state:\nversion [{}], source [{}]\n{}{}{}",
|
"failed to execute cluster state applier in [{}], state:\nversion [{}], source [{}]\n{}{}{}",
|
||||||
executionTime,
|
executionTime,
|
||||||
previousClusterState.version(),
|
previousClusterState.version(),
|
||||||
|
@ -439,8 +439,7 @@ public class ClusterApplierService extends AbstractLifecycleComponent implements
|
||||||
final long version = newClusterState.version();
|
final long version = newClusterState.version();
|
||||||
final String stateUUID = newClusterState.stateUUID();
|
final String stateUUID = newClusterState.stateUUID();
|
||||||
final String fullState = newClusterState.toString();
|
final String fullState = newClusterState.toString();
|
||||||
logger.warn(
|
logger.warn(() -> new ParameterizedMessage(
|
||||||
(org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage(
|
|
||||||
"failed to apply updated cluster state in [{}]:\nversion [{}], uuid [{}], source [{}]\n{}",
|
"failed to apply updated cluster state in [{}]:\nversion [{}], uuid [{}], source [{}]\n{}",
|
||||||
executionTime,
|
executionTime,
|
||||||
version,
|
version,
|
||||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.cluster.service;
|
||||||
|
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.elasticsearch.Assertions;
|
import org.elasticsearch.Assertions;
|
||||||
import org.elasticsearch.cluster.AckedClusterStateTaskListener;
|
import org.elasticsearch.cluster.AckedClusterStateTaskListener;
|
||||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||||
|
@ -226,10 +225,8 @@ public class MasterService extends AbstractLifecycleComponent {
|
||||||
clusterStatePublisher.accept(clusterChangedEvent, taskOutputs.createAckListener(threadPool, newClusterState));
|
clusterStatePublisher.accept(clusterChangedEvent, taskOutputs.createAckListener(threadPool, newClusterState));
|
||||||
} catch (Discovery.FailedToCommitClusterStateException t) {
|
} catch (Discovery.FailedToCommitClusterStateException t) {
|
||||||
final long version = newClusterState.version();
|
final long version = newClusterState.version();
|
||||||
logger.warn(
|
logger.warn(() -> new ParameterizedMessage(
|
||||||
(Supplier<?>) () -> new ParameterizedMessage(
|
"failing [{}]: failed to commit cluster state version [{}]", summary, version), t);
|
||||||
"failing [{}]: failed to commit cluster state version [{}]", summary, version),
|
|
||||||
t);
|
|
||||||
taskOutputs.publishingFailed(t);
|
taskOutputs.publishingFailed(t);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -239,11 +236,9 @@ public class MasterService extends AbstractLifecycleComponent {
|
||||||
try {
|
try {
|
||||||
taskOutputs.clusterStatePublished(clusterChangedEvent);
|
taskOutputs.clusterStatePublished(clusterChangedEvent);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.error(
|
logger.error(() -> new ParameterizedMessage(
|
||||||
(Supplier<?>) () -> new ParameterizedMessage(
|
|
||||||
"exception thrown while notifying executor of new cluster state publication [{}]",
|
"exception thrown while notifying executor of new cluster state publication [{}]",
|
||||||
summary),
|
summary), e);
|
||||||
e);
|
|
||||||
}
|
}
|
||||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS)));
|
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS)));
|
||||||
logger.debug("processing [{}]: took [{}] done publishing updated cluster state (version: {}, uuid: {})", summary,
|
logger.debug("processing [{}]: took [{}] done publishing updated cluster state (version: {}, uuid: {})", summary,
|
||||||
|
@ -255,8 +250,7 @@ public class MasterService extends AbstractLifecycleComponent {
|
||||||
final long version = newClusterState.version();
|
final long version = newClusterState.version();
|
||||||
final String stateUUID = newClusterState.stateUUID();
|
final String stateUUID = newClusterState.stateUUID();
|
||||||
final String fullState = newClusterState.toString();
|
final String fullState = newClusterState.toString();
|
||||||
logger.warn(
|
logger.warn(() -> new ParameterizedMessage(
|
||||||
(Supplier<?>) () -> new ParameterizedMessage(
|
|
||||||
"failed to publish updated cluster state in [{}]:\nversion [{}], uuid [{}], source [{}]\n{}",
|
"failed to publish updated cluster state in [{}]:\nversion [{}], uuid [{}], source [{}]\n{}",
|
||||||
executionTime,
|
executionTime,
|
||||||
version,
|
version,
|
||||||
|
@ -473,8 +467,7 @@ public class MasterService extends AbstractLifecycleComponent {
|
||||||
listener.onFailure(source, e);
|
listener.onFailure(source, e);
|
||||||
} catch (Exception inner) {
|
} catch (Exception inner) {
|
||||||
inner.addSuppressed(e);
|
inner.addSuppressed(e);
|
||||||
logger.error(
|
logger.error(() -> new ParameterizedMessage(
|
||||||
(Supplier<?>) () -> new ParameterizedMessage(
|
|
||||||
"exception thrown by listener notifying of failure from [{}]", source), inner);
|
"exception thrown by listener notifying of failure from [{}]", source), inner);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -484,8 +477,7 @@ public class MasterService extends AbstractLifecycleComponent {
|
||||||
try {
|
try {
|
||||||
listener.onNoLongerMaster(source);
|
listener.onNoLongerMaster(source);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.error(
|
logger.error(() -> new ParameterizedMessage(
|
||||||
(Supplier<?>) () -> new ParameterizedMessage(
|
|
||||||
"exception thrown by listener while notifying no longer master from [{}]", source), e);
|
"exception thrown by listener while notifying no longer master from [{}]", source), e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -495,12 +487,9 @@ public class MasterService extends AbstractLifecycleComponent {
|
||||||
try {
|
try {
|
||||||
listener.clusterStateProcessed(source, oldState, newState);
|
listener.clusterStateProcessed(source, oldState, newState);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.error(
|
logger.error(() -> new ParameterizedMessage(
|
||||||
(Supplier<?>) () -> new ParameterizedMessage(
|
|
||||||
"exception thrown by listener while notifying of cluster state processed from [{}], old cluster state:\n" +
|
"exception thrown by listener while notifying of cluster state processed from [{}], old cluster state:\n" +
|
||||||
"{}\nnew cluster state:\n{}",
|
"{}\nnew cluster state:\n{}", source, oldState, newState), e);
|
||||||
source, oldState, newState),
|
|
||||||
e);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -614,10 +603,8 @@ public class MasterService extends AbstractLifecycleComponent {
|
||||||
logger.trace("ack received from node [{}], cluster_state update (version: {})", node, clusterStateVersion);
|
logger.trace("ack received from node [{}], cluster_state update (version: {})", node, clusterStateVersion);
|
||||||
} else {
|
} else {
|
||||||
this.lastFailure = e;
|
this.lastFailure = e;
|
||||||
logger.debug(
|
logger.debug(() -> new ParameterizedMessage(
|
||||||
(Supplier<?>) () -> new ParameterizedMessage(
|
"ack received from node [{}], cluster_state update (version: {})", node, clusterStateVersion), e);
|
||||||
"ack received from node [{}], cluster_state update (version: {})", node, clusterStateVersion),
|
|
||||||
e);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (countDown.countDown()) {
|
if (countDown.countDown()) {
|
||||||
|
@ -650,7 +637,7 @@ public class MasterService extends AbstractLifecycleComponent {
|
||||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS)));
|
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS)));
|
||||||
if (logger.isTraceEnabled()) {
|
if (logger.isTraceEnabled()) {
|
||||||
logger.trace(
|
logger.trace(
|
||||||
(Supplier<?>) () -> new ParameterizedMessage(
|
() -> new ParameterizedMessage(
|
||||||
"failed to execute cluster state update in [{}], state:\nversion [{}], source [{}]\n{}{}{}",
|
"failed to execute cluster state update in [{}], state:\nversion [{}], source [{}]\n{}{}{}",
|
||||||
executionTime,
|
executionTime,
|
||||||
previousClusterState.version(),
|
previousClusterState.version(),
|
||||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.common.lucene;
|
||||||
|
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.apache.lucene.analysis.core.KeywordAnalyzer;
|
import org.apache.lucene.analysis.core.KeywordAnalyzer;
|
||||||
import org.apache.lucene.analysis.standard.StandardAnalyzer;
|
import org.apache.lucene.analysis.standard.StandardAnalyzer;
|
||||||
import org.apache.lucene.codecs.CodecUtil;
|
import org.apache.lucene.codecs.CodecUtil;
|
||||||
|
@ -111,7 +110,7 @@ public class Lucene {
|
||||||
try {
|
try {
|
||||||
return Version.parse(version);
|
return Version.parse(version);
|
||||||
} catch (ParseException e) {
|
} catch (ParseException e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("no version match {}, default to {}", version, defaultVersion), e);
|
logger.warn(() -> new ParameterizedMessage("no version match {}, default to {}", version, defaultVersion), e);
|
||||||
return defaultVersion;
|
return defaultVersion;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,7 +20,6 @@
|
||||||
package org.elasticsearch.common.settings;
|
package org.elasticsearch.common.settings;
|
||||||
|
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.apache.lucene.search.spell.LevensteinDistance;
|
import org.apache.lucene.search.spell.LevensteinDistance;
|
||||||
import org.apache.lucene.util.CollectionUtil;
|
import org.apache.lucene.util.CollectionUtil;
|
||||||
import org.elasticsearch.ExceptionsHelper;
|
import org.elasticsearch.ExceptionsHelper;
|
||||||
|
@ -135,7 +134,7 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
|
||||||
settingUpdater.getValue(current, previous);
|
settingUpdater.getValue(current, previous);
|
||||||
} catch (RuntimeException ex) {
|
} catch (RuntimeException ex) {
|
||||||
exceptions.add(ex);
|
exceptions.add(ex);
|
||||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to prepareCommit settings for [{}]", settingUpdater), ex);
|
logger.debug(() -> new ParameterizedMessage("failed to prepareCommit settings for [{}]", settingUpdater), ex);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// here we are exhaustive and record all settings that failed.
|
// here we are exhaustive and record all settings that failed.
|
||||||
|
@ -163,8 +162,7 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
|
||||||
try {
|
try {
|
||||||
applyRunnables.add(settingUpdater.updater(current, previous));
|
applyRunnables.add(settingUpdater.updater(current, previous));
|
||||||
} catch (Exception ex) {
|
} catch (Exception ex) {
|
||||||
logger.warn(
|
logger.warn(() -> new ParameterizedMessage("failed to prepareCommit settings for [{}]", settingUpdater), ex);
|
||||||
(Supplier<?>) () -> new ParameterizedMessage("failed to prepareCommit settings for [{}]", settingUpdater), ex);
|
|
||||||
throw ex;
|
throw ex;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.common.util;
|
||||||
|
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.elasticsearch.core.internal.io.IOUtils;
|
import org.elasticsearch.core.internal.io.IOUtils;
|
||||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||||
import org.elasticsearch.common.logging.Loggers;
|
import org.elasticsearch.common.logging.Loggers;
|
||||||
|
@ -67,7 +66,7 @@ public class IndexFolderUpgrader {
|
||||||
} catch (NoSuchFileException | FileNotFoundException exception) {
|
} catch (NoSuchFileException | FileNotFoundException exception) {
|
||||||
// thrown when the source is non-existent because the folder was renamed
|
// thrown when the source is non-existent because the folder was renamed
|
||||||
// by another node (shared FS) after we checked if the target exists
|
// by another node (shared FS) after we checked if the target exists
|
||||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("multiple nodes trying to upgrade [{}] in parallel, retry " +
|
logger.error(() -> new ParameterizedMessage("multiple nodes trying to upgrade [{}] in parallel, retry " +
|
||||||
"upgrading with single node", target), exception);
|
"upgrading with single node", target), exception);
|
||||||
throw exception;
|
throw exception;
|
||||||
} finally {
|
} finally {
|
||||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.common.util.concurrent;
|
||||||
|
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
|
|
||||||
public class LoggingRunnable implements Runnable {
|
public class LoggingRunnable implements Runnable {
|
||||||
|
|
||||||
|
@ -38,7 +37,7 @@ public class LoggingRunnable implements Runnable {
|
||||||
try {
|
try {
|
||||||
runnable.run();
|
runnable.run();
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to execute [{}]", runnable.toString()), e);
|
logger.warn(() -> new ParameterizedMessage("failed to execute [{}]", runnable.toString()), e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -20,7 +20,6 @@ package org.elasticsearch.discovery;
|
||||||
|
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||||
import org.elasticsearch.common.logging.ESLoggerFactory;
|
import org.elasticsearch.common.logging.ESLoggerFactory;
|
||||||
|
|
||||||
|
@ -70,7 +69,7 @@ public class AckClusterStatePublishResponseHandler extends BlockingClusterStateP
|
||||||
ackListener.onNodeAck(node, e);
|
ackListener.onNodeAck(node, e);
|
||||||
} catch (Exception inner) {
|
} catch (Exception inner) {
|
||||||
inner.addSuppressed(e);
|
inner.addSuppressed(e);
|
||||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("error while processing ack for node [{}]", node), inner);
|
logger.debug(() -> new ParameterizedMessage("error while processing ack for node [{}]", node), inner);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -76,11 +76,7 @@ public class SingleNodeDiscovery extends AbstractLifecycleComponent implements D
|
||||||
public void onFailure(String source, Exception e) {
|
public void onFailure(String source, Exception e) {
|
||||||
latch.countDown();
|
latch.countDown();
|
||||||
ackListener.onNodeAck(transportService.getLocalNode(), e);
|
ackListener.onNodeAck(transportService.getLocalNode(), e);
|
||||||
logger.warn(
|
logger.warn(() -> new ParameterizedMessage("failed while applying cluster state locally [{}]", event.source()), e);
|
||||||
(org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage(
|
|
||||||
"failed while applying cluster state locally [{}]",
|
|
||||||
event.source()),
|
|
||||||
e);
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
clusterApplier.onNewClusterState("apply-locally-on-node[" + event.source() + "]", () -> clusterState, listener);
|
clusterApplier.onNewClusterState("apply-locally-on-node[" + event.source() + "]", () -> clusterState, listener);
|
||||||
|
|
|
@ -20,7 +20,6 @@
|
||||||
package org.elasticsearch.discovery.zen;
|
package org.elasticsearch.discovery.zen;
|
||||||
|
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.elasticsearch.ElasticsearchException;
|
import org.elasticsearch.ElasticsearchException;
|
||||||
import org.elasticsearch.cluster.ClusterName;
|
import org.elasticsearch.cluster.ClusterName;
|
||||||
import org.elasticsearch.cluster.ClusterState;
|
import org.elasticsearch.cluster.ClusterState;
|
||||||
|
@ -270,13 +269,9 @@ public class MasterFaultDetection extends FaultDetection {
|
||||||
}
|
}
|
||||||
|
|
||||||
int retryCount = ++MasterFaultDetection.this.retryCount;
|
int retryCount = ++MasterFaultDetection.this.retryCount;
|
||||||
logger.trace(
|
logger.trace(() -> new ParameterizedMessage(
|
||||||
(Supplier<?>) () -> new ParameterizedMessage(
|
|
||||||
"[master] failed to ping [{}], retry [{}] out of [{}]",
|
"[master] failed to ping [{}], retry [{}] out of [{}]",
|
||||||
masterNode,
|
masterNode, retryCount, pingRetryCount), exp);
|
||||||
retryCount,
|
|
||||||
pingRetryCount),
|
|
||||||
exp);
|
|
||||||
if (retryCount >= pingRetryCount) {
|
if (retryCount >= pingRetryCount) {
|
||||||
logger.debug("[master] failed to ping [{}], tried [{}] times, each with maximum [{}] timeout",
|
logger.debug("[master] failed to ping [{}], tried [{}] times, each with maximum [{}] timeout",
|
||||||
masterNode, pingRetryCount, pingRetryTimeout);
|
masterNode, pingRetryCount, pingRetryTimeout);
|
||||||
|
|
|
@ -20,7 +20,6 @@ package org.elasticsearch.discovery.zen;
|
||||||
|
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.apache.lucene.store.AlreadyClosedException;
|
import org.apache.lucene.store.AlreadyClosedException;
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||||
|
@ -364,7 +363,7 @@ public class NodeJoinController extends AbstractComponent {
|
||||||
try {
|
try {
|
||||||
callback.onFailure(e);
|
callback.onFailure(e);
|
||||||
} catch (Exception inner) {
|
} catch (Exception inner) {
|
||||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("error handling task failure [{}]", e), inner);
|
logger.error(() -> new ParameterizedMessage("error handling task failure [{}]", e), inner);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -375,7 +374,7 @@ public class NodeJoinController extends AbstractComponent {
|
||||||
try {
|
try {
|
||||||
callback.onSuccess();
|
callback.onSuccess();
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected error during [{}]", source), e);
|
logger.error(() -> new ParameterizedMessage("unexpected error during [{}]", source), e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,7 +20,6 @@
|
||||||
package org.elasticsearch.discovery.zen;
|
package org.elasticsearch.discovery.zen;
|
||||||
|
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.elasticsearch.cluster.ClusterName;
|
import org.elasticsearch.cluster.ClusterName;
|
||||||
import org.elasticsearch.cluster.ClusterState;
|
import org.elasticsearch.cluster.ClusterState;
|
||||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||||
|
@ -177,12 +176,8 @@ public class NodesFaultDetection extends FaultDetection {
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
} catch (EsRejectedExecutionException ex) {
|
} catch (EsRejectedExecutionException ex) {
|
||||||
logger.trace(
|
logger.trace(() -> new ParameterizedMessage(
|
||||||
(Supplier<?>) () -> new ParameterizedMessage(
|
"[node ] [{}] ignoring node failure (reason [{}]). Local node is shutting down", node, reason), ex);
|
||||||
"[node ] [{}] ignoring node failure (reason [{}]). Local node is shutting down",
|
|
||||||
node,
|
|
||||||
reason),
|
|
||||||
ex);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -247,13 +242,8 @@ public class NodesFaultDetection extends FaultDetection {
|
||||||
}
|
}
|
||||||
|
|
||||||
retryCount++;
|
retryCount++;
|
||||||
logger.trace(
|
logger.trace( () -> new ParameterizedMessage(
|
||||||
(Supplier<?>) () -> new ParameterizedMessage(
|
"[node ] failed to ping [{}], retry [{}] out of [{}]", node, retryCount, pingRetryCount), exp);
|
||||||
"[node ] failed to ping [{}], retry [{}] out of [{}]",
|
|
||||||
node,
|
|
||||||
retryCount,
|
|
||||||
pingRetryCount),
|
|
||||||
exp);
|
|
||||||
if (retryCount >= pingRetryCount) {
|
if (retryCount >= pingRetryCount) {
|
||||||
logger.debug("[node ] failed to ping [{}], tried [{}] times, each with maximum [{}] timeout", node,
|
logger.debug("[node ] failed to ping [{}], tried [{}] times, each with maximum [{}] timeout", node,
|
||||||
pingRetryCount, pingRetryTimeout);
|
pingRetryCount, pingRetryTimeout);
|
||||||
|
|
|
@ -247,9 +247,7 @@ public class PublishClusterStateAction extends AbstractComponent {
|
||||||
bytes = serializeFullClusterState(clusterState, node.getVersion());
|
bytes = serializeFullClusterState(clusterState, node.getVersion());
|
||||||
serializedStates.put(node.getVersion(), bytes);
|
serializedStates.put(node.getVersion(), bytes);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.warn(
|
logger.warn(() -> new ParameterizedMessage("failed to serialize cluster_state before publishing it to node {}", node), e);
|
||||||
(org.apache.logging.log4j.util.Supplier<?>) () ->
|
|
||||||
new ParameterizedMessage("failed to serialize cluster_state before publishing it to node {}", node), e);
|
|
||||||
sendingController.onNodeSendFailed(node, e);
|
sendingController.onNodeSendFailed(node, e);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -297,16 +295,13 @@ public class PublishClusterStateAction extends AbstractComponent {
|
||||||
logger.debug("resending full cluster state to node {} reason {}", node, exp.getDetailedMessage());
|
logger.debug("resending full cluster state to node {} reason {}", node, exp.getDetailedMessage());
|
||||||
sendFullClusterState(clusterState, serializedStates, node, publishTimeout, sendingController);
|
sendFullClusterState(clusterState, serializedStates, node, publishTimeout, sendingController);
|
||||||
} else {
|
} else {
|
||||||
logger.debug((org.apache.logging.log4j.util.Supplier<?>) () ->
|
logger.debug(() -> new ParameterizedMessage("failed to send cluster state to {}", node), exp);
|
||||||
new ParameterizedMessage("failed to send cluster state to {}", node), exp);
|
|
||||||
sendingController.onNodeSendFailed(node, exp);
|
sendingController.onNodeSendFailed(node, exp);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.warn(
|
logger.warn(() -> new ParameterizedMessage("error sending cluster state to {}", node), e);
|
||||||
(org.apache.logging.log4j.util.Supplier<?>) () ->
|
|
||||||
new ParameterizedMessage("error sending cluster state to {}", node), e);
|
|
||||||
sendingController.onNodeSendFailed(node, e);
|
sendingController.onNodeSendFailed(node, e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -333,15 +328,13 @@ public class PublishClusterStateAction extends AbstractComponent {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void handleException(TransportException exp) {
|
public void handleException(TransportException exp) {
|
||||||
logger.debug((org.apache.logging.log4j.util.Supplier<?>) () ->
|
logger.debug(() -> new ParameterizedMessage("failed to commit cluster state (uuid [{}], version [{}]) to {}",
|
||||||
new ParameterizedMessage("failed to commit cluster state (uuid [{}], version [{}]) to {}",
|
|
||||||
clusterState.stateUUID(), clusterState.version(), node), exp);
|
clusterState.stateUUID(), clusterState.version(), node), exp);
|
||||||
sendingController.getPublishResponseHandler().onFailure(node, exp);
|
sendingController.getPublishResponseHandler().onFailure(node, exp);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
} catch (Exception t) {
|
} catch (Exception t) {
|
||||||
logger.warn((org.apache.logging.log4j.util.Supplier<?>) () ->
|
logger.warn(() -> new ParameterizedMessage("error sending cluster state commit (uuid [{}], version [{}]) to {}",
|
||||||
new ParameterizedMessage("error sending cluster state commit (uuid [{}], version [{}]) to {}",
|
|
||||||
clusterState.stateUUID(), clusterState.version(), node), t);
|
clusterState.stateUUID(), clusterState.version(), node), t);
|
||||||
sendingController.getPublishResponseHandler().onFailure(node, t);
|
sendingController.getPublishResponseHandler().onFailure(node, t);
|
||||||
}
|
}
|
||||||
|
@ -616,7 +609,7 @@ public class PublishClusterStateAction extends AbstractComponent {
|
||||||
if (committedOrFailed()) {
|
if (committedOrFailed()) {
|
||||||
return committed == false;
|
return committed == false;
|
||||||
}
|
}
|
||||||
logger.trace((org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage("failed to commit version [{}]. {}",
|
logger.trace(() -> new ParameterizedMessage("failed to commit version [{}]. {}",
|
||||||
clusterState.version(), details), reason);
|
clusterState.version(), details), reason);
|
||||||
committed = false;
|
committed = false;
|
||||||
committedOrFailedLatch.countDown();
|
committedOrFailedLatch.countDown();
|
||||||
|
|
|
@ -22,7 +22,6 @@ package org.elasticsearch.discovery.zen;
|
||||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.apache.lucene.store.AlreadyClosedException;
|
import org.apache.lucene.store.AlreadyClosedException;
|
||||||
import org.elasticsearch.core.internal.io.IOUtils;
|
import org.elasticsearch.core.internal.io.IOUtils;
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
|
@ -513,18 +512,13 @@ public class UnicastZenPing extends AbstractComponent implements ZenPing {
|
||||||
public void onFailure(Exception e) {
|
public void onFailure(Exception e) {
|
||||||
if (e instanceof ConnectTransportException || e instanceof AlreadyClosedException) {
|
if (e instanceof ConnectTransportException || e instanceof AlreadyClosedException) {
|
||||||
// can't connect to the node - this is more common path!
|
// can't connect to the node - this is more common path!
|
||||||
logger.trace(
|
logger.trace(() -> new ParameterizedMessage("[{}] failed to ping {}", pingingRound.id(), node), e);
|
||||||
(Supplier<?>) () -> new ParameterizedMessage(
|
|
||||||
"[{}] failed to ping {}", pingingRound.id(), node), e);
|
|
||||||
} else if (e instanceof RemoteTransportException) {
|
} else if (e instanceof RemoteTransportException) {
|
||||||
// something went wrong on the other side
|
// something went wrong on the other side
|
||||||
logger.debug(
|
logger.debug(() -> new ParameterizedMessage(
|
||||||
(Supplier<?>) () -> new ParameterizedMessage(
|
|
||||||
"[{}] received a remote error as a response to ping {}", pingingRound.id(), node), e);
|
"[{}] received a remote error as a response to ping {}", pingingRound.id(), node), e);
|
||||||
} else {
|
} else {
|
||||||
logger.warn(
|
logger.warn(() -> new ParameterizedMessage("[{}] failed send ping to {}", pingingRound.id(), node), e);
|
||||||
(Supplier<?>) () -> new ParameterizedMessage(
|
|
||||||
"[{}] failed send ping to {}", pingingRound.id(), node), e);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -574,9 +568,9 @@ public class UnicastZenPing extends AbstractComponent implements ZenPing {
|
||||||
if (exp instanceof ConnectTransportException || exp.getCause() instanceof ConnectTransportException ||
|
if (exp instanceof ConnectTransportException || exp.getCause() instanceof ConnectTransportException ||
|
||||||
exp.getCause() instanceof AlreadyClosedException) {
|
exp.getCause() instanceof AlreadyClosedException) {
|
||||||
// ok, not connected...
|
// ok, not connected...
|
||||||
logger.trace((Supplier<?>) () -> new ParameterizedMessage("failed to connect to {}", node), exp);
|
logger.trace(() -> new ParameterizedMessage("failed to connect to {}", node), exp);
|
||||||
} else if (closed == false) {
|
} else if (closed == false) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to send ping to [{}]", node), exp);
|
logger.warn(() -> new ParameterizedMessage("failed to send ping to [{}]", node), exp);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
|
@ -291,7 +291,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
||||||
try {
|
try {
|
||||||
membership.sendLeaveRequestBlocking(nodes.getMasterNode(), nodes.getLocalNode(), TimeValue.timeValueSeconds(1));
|
membership.sendLeaveRequestBlocking(nodes.getMasterNode(), nodes.getLocalNode(), TimeValue.timeValueSeconds(1));
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to send leave request to master [{}]", nodes.getMasterNode()), e);
|
logger.debug(() -> new ParameterizedMessage("failed to send leave request to master [{}]", nodes.getMasterNode()), e);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// we're master -> let other potential master we left and start a master election now rather then wait for masterFD
|
// we're master -> let other potential master we left and start a master election now rather then wait for masterFD
|
||||||
|
@ -303,7 +303,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
||||||
try {
|
try {
|
||||||
membership.sendLeaveRequest(nodes.getLocalNode(), possibleMaster);
|
membership.sendLeaveRequest(nodes.getLocalNode(), possibleMaster);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to send leave request from master [{}] to possible master [{}]", nodes.getMasterNode(), possibleMaster), e);
|
logger.debug(() -> new ParameterizedMessage("failed to send leave request from master [{}] to possible master [{}]", nodes.getMasterNode(), possibleMaster), e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -367,11 +367,8 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
||||||
processedOrFailed.set(true);
|
processedOrFailed.set(true);
|
||||||
latch.countDown();
|
latch.countDown();
|
||||||
ackListener.onNodeAck(localNode, e);
|
ackListener.onNodeAck(localNode, e);
|
||||||
logger.warn(
|
logger.warn(() -> new ParameterizedMessage(
|
||||||
(org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage(
|
"failed while applying cluster state locally [{}]", clusterChangedEvent.source()), e);
|
||||||
"failed while applying cluster state locally [{}]",
|
|
||||||
clusterChangedEvent.source()),
|
|
||||||
e);
|
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -393,11 +390,8 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
||||||
try {
|
try {
|
||||||
latch.await();
|
latch.await();
|
||||||
} catch (InterruptedException e) {
|
} catch (InterruptedException e) {
|
||||||
logger.debug(
|
logger.debug(() -> new ParameterizedMessage(
|
||||||
(org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage(
|
"interrupted while applying cluster state locally [{}]", clusterChangedEvent.source()), e);
|
||||||
"interrupted while applying cluster state locally [{}]",
|
|
||||||
clusterChangedEvent.source()),
|
|
||||||
e);
|
|
||||||
Thread.currentThread().interrupt();
|
Thread.currentThread().interrupt();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -514,7 +508,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
||||||
// first, make sure we can connect to the master
|
// first, make sure we can connect to the master
|
||||||
transportService.connectToNode(masterNode);
|
transportService.connectToNode(masterNode);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to connect to master [{}], retrying...", masterNode), e);
|
logger.warn(() -> new ParameterizedMessage("failed to connect to master [{}], retrying...", masterNode), e);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
int joinAttempt = 0; // we retry on illegal state if the master is not yet ready
|
int joinAttempt = 0; // we retry on illegal state if the master is not yet ready
|
||||||
|
@ -534,7 +528,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (logger.isTraceEnabled()) {
|
if (logger.isTraceEnabled()) {
|
||||||
logger.trace((Supplier<?>) () -> new ParameterizedMessage("failed to send join request to master [{}]", masterNode), e);
|
logger.trace(() -> new ParameterizedMessage("failed to send join request to master [{}]", masterNode), e);
|
||||||
} else {
|
} else {
|
||||||
logger.info("failed to send join request to master [{}], reason [{}]", masterNode, ExceptionsHelper.detailedMessage(e));
|
logger.info("failed to send join request to master [{}], reason [{}]", masterNode, ExceptionsHelper.detailedMessage(e));
|
||||||
}
|
}
|
||||||
|
@ -646,7 +640,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(final String source, final Exception e) {
|
public void onFailure(final String source, final Exception e) {
|
||||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
|
logger.error(() -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -718,7 +712,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.info((Supplier<?>) () -> new ParameterizedMessage("master_left [{}], reason [{}]", masterNode, reason), cause);
|
logger.info(() -> new ParameterizedMessage("master_left [{}], reason [{}]", masterNode, reason), cause);
|
||||||
|
|
||||||
synchronized (stateMutex) {
|
synchronized (stateMutex) {
|
||||||
if (localNodeMaster() == false && masterNode.equals(committedState.get().nodes().getMasterNode())) {
|
if (localNodeMaster() == false && masterNode.equals(committedState.get().nodes().getMasterNode())) {
|
||||||
|
@ -764,7 +758,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
||||||
pendingStatesQueue.markAsFailed(newClusterState, e);
|
pendingStatesQueue.markAsFailed(newClusterState, e);
|
||||||
} catch (Exception inner) {
|
} catch (Exception inner) {
|
||||||
inner.addSuppressed(e);
|
inner.addSuppressed(e);
|
||||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected exception while failing [{}]", reason), inner);
|
logger.error(() -> new ParameterizedMessage("unexpected exception while failing [{}]", reason), inner);
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -807,14 +801,14 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(String source, Exception e) {
|
public void onFailure(String source, Exception e) {
|
||||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure applying [{}]", reason), e);
|
logger.error(() -> new ParameterizedMessage("unexpected failure applying [{}]", reason), e);
|
||||||
try {
|
try {
|
||||||
// TODO: use cluster state uuid instead of full cluster state so that we don't keep reference to CS around
|
// TODO: use cluster state uuid instead of full cluster state so that we don't keep reference to CS around
|
||||||
// for too long.
|
// for too long.
|
||||||
pendingStatesQueue.markAsFailed(newClusterState, e);
|
pendingStatesQueue.markAsFailed(newClusterState, e);
|
||||||
} catch (Exception inner) {
|
} catch (Exception inner) {
|
||||||
inner.addSuppressed(e);
|
inner.addSuppressed(e);
|
||||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected exception while failing [{}]", reason), inner);
|
logger.error(() -> new ParameterizedMessage("unexpected exception while failing [{}]", reason), inner);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
@ -880,7 +874,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
||||||
try {
|
try {
|
||||||
membership.sendValidateJoinRequestBlocking(node, state, joinTimeout);
|
membership.sendValidateJoinRequestBlocking(node, state, joinTimeout);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to validate incoming join request from node [{}]", node),
|
logger.warn(() -> new ParameterizedMessage("failed to validate incoming join request from node [{}]", node),
|
||||||
e);
|
e);
|
||||||
callback.onFailure(new IllegalStateException("failure when sending a validation request to node", e));
|
callback.onFailure(new IllegalStateException("failure when sending a validation request to node", e));
|
||||||
return;
|
return;
|
||||||
|
@ -1029,11 +1023,11 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void handleException(TransportException exp) {
|
public void handleException(TransportException exp) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to send rejoin request to [{}]", otherMaster), exp);
|
logger.warn(() -> new ParameterizedMessage("failed to send rejoin request to [{}]", otherMaster), exp);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to send rejoin request to [{}]", otherMaster), e);
|
logger.warn(() -> new ParameterizedMessage("failed to send rejoin request to [{}]", otherMaster), e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.env;
|
||||||
|
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.apache.lucene.index.IndexWriter;
|
import org.apache.lucene.index.IndexWriter;
|
||||||
import org.apache.lucene.index.SegmentInfos;
|
import org.apache.lucene.index.SegmentInfos;
|
||||||
import org.apache.lucene.store.Directory;
|
import org.apache.lucene.store.Directory;
|
||||||
|
@ -218,8 +217,8 @@ public final class NodeEnvironment implements Closeable {
|
||||||
}
|
}
|
||||||
|
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
startupTraceLogger.trace(
|
startupTraceLogger.trace(() -> new ParameterizedMessage(
|
||||||
(Supplier<?>) () -> new ParameterizedMessage("failed to obtain node lock on {}", dir.toAbsolutePath()), e);
|
"failed to obtain node lock on {}", dir.toAbsolutePath()), e);
|
||||||
lastException = new IOException("failed to obtain lock on " + dir.toAbsolutePath(), e);
|
lastException = new IOException("failed to obtain lock on " + dir.toAbsolutePath(), e);
|
||||||
// release all the ones that were obtained up until now
|
// release all the ones that were obtained up until now
|
||||||
releaseAndNullLocks(locks);
|
releaseAndNullLocks(locks);
|
||||||
|
@ -898,7 +897,7 @@ public final class NodeEnvironment implements Closeable {
|
||||||
logger.trace("releasing lock [{}]", lock);
|
logger.trace("releasing lock [{}]", lock);
|
||||||
lock.close();
|
lock.close();
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.trace((Supplier<?>) () -> new ParameterizedMessage("failed to release lock [{}]", lock), e);
|
logger.trace(() -> new ParameterizedMessage("failed to release lock [{}]", lock), e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.gateway;
|
||||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.elasticsearch.ElasticsearchTimeoutException;
|
import org.elasticsearch.ElasticsearchTimeoutException;
|
||||||
import org.elasticsearch.ExceptionsHelper;
|
import org.elasticsearch.ExceptionsHelper;
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
|
@ -217,7 +216,7 @@ public abstract class AsyncShardFetch<T extends BaseNodeResponse> implements Rel
|
||||||
unwrappedCause instanceof ElasticsearchTimeoutException) {
|
unwrappedCause instanceof ElasticsearchTimeoutException) {
|
||||||
nodeEntry.restartFetching();
|
nodeEntry.restartFetching();
|
||||||
} else {
|
} else {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("{}: failed to list shard for {} on node [{}]",
|
logger.warn(() -> new ParameterizedMessage("{}: failed to list shard for {} on node [{}]",
|
||||||
shardId, type, failure.nodeId()), failure);
|
shardId, type, failure.nodeId()), failure);
|
||||||
nodeEntry.doneFetching(failure.getCause());
|
nodeEntry.doneFetching(failure.getCause());
|
||||||
}
|
}
|
||||||
|
|
|
@ -128,9 +128,7 @@ public class Gateway extends AbstractComponent {
|
||||||
}
|
}
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
final Index electedIndex = electedIndexMetaData.getIndex();
|
final Index electedIndex = electedIndexMetaData.getIndex();
|
||||||
logger.warn(
|
logger.warn(() -> new ParameterizedMessage("recovering index {} failed - recovering as closed", electedIndex), e);
|
||||||
(org.apache.logging.log4j.util.Supplier<?>)
|
|
||||||
() -> new ParameterizedMessage("recovering index {} failed - recovering as closed", electedIndex), e);
|
|
||||||
electedIndexMetaData = IndexMetaData.builder(electedIndexMetaData).state(IndexMetaData.State.CLOSE).build();
|
electedIndexMetaData = IndexMetaData.builder(electedIndexMetaData).state(IndexMetaData.State.CLOSE).build();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -159,13 +157,8 @@ public class Gateway extends AbstractComponent {
|
||||||
}
|
}
|
||||||
|
|
||||||
private void logInvalidSetting(String settingType, Map.Entry<String, String> e, IllegalArgumentException ex) {
|
private void logInvalidSetting(String settingType, Map.Entry<String, String> e, IllegalArgumentException ex) {
|
||||||
logger.warn(
|
logger.warn(() -> new ParameterizedMessage("ignoring invalid {} setting: [{}] with value [{}]; archiving",
|
||||||
(org.apache.logging.log4j.util.Supplier<?>)
|
settingType, e.getKey(), e.getValue()), ex);
|
||||||
() -> new ParameterizedMessage("ignoring invalid {} setting: [{}] with value [{}]; archiving",
|
|
||||||
settingType,
|
|
||||||
e.getKey(),
|
|
||||||
e.getValue()),
|
|
||||||
ex);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public interface GatewayStateRecoveredListener {
|
public interface GatewayStateRecoveredListener {
|
||||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.gateway;
|
||||||
|
|
||||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||||
import org.elasticsearch.cluster.ClusterState;
|
import org.elasticsearch.cluster.ClusterState;
|
||||||
import org.elasticsearch.cluster.ClusterStateListener;
|
import org.elasticsearch.cluster.ClusterStateListener;
|
||||||
|
@ -283,7 +282,7 @@ public class GatewayService extends AbstractLifecycleComponent implements Cluste
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(String source, Exception e) {
|
public void onFailure(String source, Exception e) {
|
||||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
|
logger.error(() -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
|
||||||
GatewayRecoveryListener.this.onFailure("failed to updated cluster state");
|
GatewayRecoveryListener.this.onFailure("failed to updated cluster state");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -20,7 +20,6 @@
|
||||||
package org.elasticsearch.gateway;
|
package org.elasticsearch.gateway;
|
||||||
|
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.cluster.ClusterState;
|
import org.elasticsearch.cluster.ClusterState;
|
||||||
import org.elasticsearch.cluster.ClusterStateUpdateTask;
|
import org.elasticsearch.cluster.ClusterStateUpdateTask;
|
||||||
|
@ -158,7 +157,7 @@ public class LocalAllocateDangledIndices extends AbstractComponent {
|
||||||
minIndexCompatibilityVersion);
|
minIndexCompatibilityVersion);
|
||||||
} catch (Exception ex) {
|
} catch (Exception ex) {
|
||||||
// upgrade failed - adding index as closed
|
// upgrade failed - adding index as closed
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("found dangled index [{}] on node [{}]. This index cannot be upgraded to the latest version, adding as closed", indexMetaData.getIndex(), request.fromNode), ex);
|
logger.warn(() -> new ParameterizedMessage("found dangled index [{}] on node [{}]. This index cannot be upgraded to the latest version, adding as closed", indexMetaData.getIndex(), request.fromNode), ex);
|
||||||
upgradedIndexMetaData = IndexMetaData.builder(indexMetaData).state(IndexMetaData.State.CLOSE).version(indexMetaData.getVersion() + 1).build();
|
upgradedIndexMetaData = IndexMetaData.builder(indexMetaData).state(IndexMetaData.State.CLOSE).version(indexMetaData.getVersion() + 1).build();
|
||||||
}
|
}
|
||||||
metaData.put(upgradedIndexMetaData, false);
|
metaData.put(upgradedIndexMetaData, false);
|
||||||
|
@ -183,7 +182,7 @@ public class LocalAllocateDangledIndices extends AbstractComponent {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(String source, Exception e) {
|
public void onFailure(String source, Exception e) {
|
||||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
|
logger.error(() -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
|
||||||
try {
|
try {
|
||||||
channel.sendResponse(e);
|
channel.sendResponse(e);
|
||||||
} catch (Exception inner) {
|
} catch (Exception inner) {
|
||||||
|
|
|
@ -20,7 +20,6 @@ package org.elasticsearch.gateway;
|
||||||
|
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.apache.lucene.codecs.CodecUtil;
|
import org.apache.lucene.codecs.CodecUtil;
|
||||||
import org.apache.lucene.index.CorruptIndexException;
|
import org.apache.lucene.index.CorruptIndexException;
|
||||||
import org.apache.lucene.index.IndexFormatTooNewException;
|
import org.apache.lucene.index.IndexFormatTooNewException;
|
||||||
|
@ -323,8 +322,7 @@ public abstract class MetaDataStateFormat<T> {
|
||||||
return state;
|
return state;
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
exceptions.add(new IOException("failed to read " + pathAndStateId.toString(), e));
|
exceptions.add(new IOException("failed to read " + pathAndStateId.toString(), e));
|
||||||
logger.debug(
|
logger.debug(() -> new ParameterizedMessage(
|
||||||
(Supplier<?>) () -> new ParameterizedMessage(
|
|
||||||
"{}: failed to read [{}], ignoring...", pathAndStateId.file.toAbsolutePath(), prefix), e);
|
"{}: failed to read [{}], ignoring...", pathAndStateId.file.toAbsolutePath(), prefix), e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,7 +20,6 @@
|
||||||
package org.elasticsearch.gateway;
|
package org.elasticsearch.gateway;
|
||||||
|
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||||
import org.elasticsearch.cluster.metadata.MetaData;
|
import org.elasticsearch.cluster.metadata.MetaData;
|
||||||
import org.elasticsearch.common.Nullable;
|
import org.elasticsearch.common.Nullable;
|
||||||
|
@ -125,7 +124,7 @@ public class MetaStateService extends AbstractComponent {
|
||||||
IndexMetaData.FORMAT.write(indexMetaData,
|
IndexMetaData.FORMAT.write(indexMetaData,
|
||||||
nodeEnv.indexPaths(indexMetaData.getIndex()));
|
nodeEnv.indexPaths(indexMetaData.getIndex()));
|
||||||
} catch (Exception ex) {
|
} catch (Exception ex) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}]: failed to write index state", index), ex);
|
logger.warn(() -> new ParameterizedMessage("[{}]: failed to write index state", index), ex);
|
||||||
throw new IOException("failed to write state for [" + index + "]", ex);
|
throw new IOException("failed to write state for [" + index + "]", ex);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.gateway;
|
||||||
|
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||||
import org.elasticsearch.cluster.routing.RecoverySource;
|
import org.elasticsearch.cluster.routing.RecoverySource;
|
||||||
|
@ -259,9 +258,9 @@ public abstract class PrimaryShardAllocator extends BaseGatewayShardAllocator {
|
||||||
} else {
|
} else {
|
||||||
final String finalAllocationId = allocationId;
|
final String finalAllocationId = allocationId;
|
||||||
if (nodeShardState.storeException() instanceof ShardLockObtainFailedException) {
|
if (nodeShardState.storeException() instanceof ShardLockObtainFailedException) {
|
||||||
logger.trace((Supplier<?>) () -> new ParameterizedMessage("[{}] on node [{}] has allocation id [{}] but the store can not be opened as it's locked, treating as valid shard", shard, nodeShardState.getNode(), finalAllocationId), nodeShardState.storeException());
|
logger.trace(() -> new ParameterizedMessage("[{}] on node [{}] has allocation id [{}] but the store can not be opened as it's locked, treating as valid shard", shard, nodeShardState.getNode(), finalAllocationId), nodeShardState.storeException());
|
||||||
} else {
|
} else {
|
||||||
logger.trace((Supplier<?>) () -> new ParameterizedMessage("[{}] on node [{}] has allocation id [{}] but the store can not be opened, treating as no allocation id", shard, nodeShardState.getNode(), finalAllocationId), nodeShardState.storeException());
|
logger.trace(() -> new ParameterizedMessage("[{}] on node [{}] has allocation id [{}] but the store can not be opened, treating as no allocation id", shard, nodeShardState.getNode(), finalAllocationId), nodeShardState.storeException());
|
||||||
allocationId = null;
|
allocationId = null;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,7 +20,6 @@
|
||||||
package org.elasticsearch.gateway;
|
package org.elasticsearch.gateway;
|
||||||
|
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.elasticsearch.ElasticsearchException;
|
import org.elasticsearch.ElasticsearchException;
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
|
@ -146,8 +145,7 @@ public class TransportNodesListGatewayStartedShards extends
|
||||||
Store.tryOpenIndex(shardPath.resolveIndex(), shardId, nodeEnv::shardLock, logger);
|
Store.tryOpenIndex(shardPath.resolveIndex(), shardId, nodeEnv::shardLock, logger);
|
||||||
} catch (Exception exception) {
|
} catch (Exception exception) {
|
||||||
final ShardPath finalShardPath = shardPath;
|
final ShardPath finalShardPath = shardPath;
|
||||||
logger.trace(
|
logger.trace(() -> new ParameterizedMessage(
|
||||||
(Supplier<?>) () -> new ParameterizedMessage(
|
|
||||||
"{} can't open index for shard [{}] in path [{}]",
|
"{} can't open index for shard [{}] in path [{}]",
|
||||||
shardId,
|
shardId,
|
||||||
shardStateMetaData,
|
shardStateMetaData,
|
||||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.index;
|
||||||
|
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||||
import org.elasticsearch.common.Nullable;
|
import org.elasticsearch.common.Nullable;
|
||||||
import org.elasticsearch.common.logging.Loggers;
|
import org.elasticsearch.common.logging.Loggers;
|
||||||
|
@ -61,7 +60,7 @@ final class CompositeIndexEventListener implements IndexEventListener {
|
||||||
try {
|
try {
|
||||||
listener.shardRoutingChanged(indexShard, oldRouting, newRouting);
|
listener.shardRoutingChanged(indexShard, oldRouting, newRouting);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to invoke shard touring changed callback", indexShard.shardId().getId()), e);
|
logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke shard touring changed callback", indexShard.shardId().getId()), e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -72,7 +71,7 @@ final class CompositeIndexEventListener implements IndexEventListener {
|
||||||
try {
|
try {
|
||||||
listener.afterIndexShardCreated(indexShard);
|
listener.afterIndexShardCreated(indexShard);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to invoke after shard created callback", indexShard.shardId().getId()), e);
|
logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke after shard created callback", indexShard.shardId().getId()), e);
|
||||||
throw e;
|
throw e;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -84,7 +83,7 @@ final class CompositeIndexEventListener implements IndexEventListener {
|
||||||
try {
|
try {
|
||||||
listener.afterIndexShardStarted(indexShard);
|
listener.afterIndexShardStarted(indexShard);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to invoke after shard started callback", indexShard.shardId().getId()), e);
|
logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke after shard started callback", indexShard.shardId().getId()), e);
|
||||||
throw e;
|
throw e;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -97,7 +96,7 @@ final class CompositeIndexEventListener implements IndexEventListener {
|
||||||
try {
|
try {
|
||||||
listener.beforeIndexShardClosed(shardId, indexShard, indexSettings);
|
listener.beforeIndexShardClosed(shardId, indexShard, indexSettings);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to invoke before shard closed callback", shardId.getId()), e);
|
logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke before shard closed callback", shardId.getId()), e);
|
||||||
throw e;
|
throw e;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -110,7 +109,7 @@ final class CompositeIndexEventListener implements IndexEventListener {
|
||||||
try {
|
try {
|
||||||
listener.afterIndexShardClosed(shardId, indexShard, indexSettings);
|
listener.afterIndexShardClosed(shardId, indexShard, indexSettings);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to invoke after shard closed callback", shardId.getId()), e);
|
logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke after shard closed callback", shardId.getId()), e);
|
||||||
throw e;
|
throw e;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -122,7 +121,7 @@ final class CompositeIndexEventListener implements IndexEventListener {
|
||||||
try {
|
try {
|
||||||
listener.onShardInactive(indexShard);
|
listener.onShardInactive(indexShard);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to invoke on shard inactive callback", indexShard.shardId().getId()), e);
|
logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke on shard inactive callback", indexShard.shardId().getId()), e);
|
||||||
throw e;
|
throw e;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -134,7 +133,7 @@ final class CompositeIndexEventListener implements IndexEventListener {
|
||||||
try {
|
try {
|
||||||
listener.indexShardStateChanged(indexShard, previousState, indexShard.state(), reason);
|
listener.indexShardStateChanged(indexShard, previousState, indexShard.state(), reason);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to invoke index shard state changed callback", indexShard.shardId().getId()), e);
|
logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke index shard state changed callback", indexShard.shardId().getId()), e);
|
||||||
throw e;
|
throw e;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -170,7 +169,7 @@ final class CompositeIndexEventListener implements IndexEventListener {
|
||||||
try {
|
try {
|
||||||
listener.beforeIndexShardCreated(shardId, indexSettings);
|
listener.beforeIndexShardCreated(shardId, indexSettings);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to invoke before shard created callback", shardId), e);
|
logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke before shard created callback", shardId), e);
|
||||||
throw e;
|
throw e;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -207,7 +206,7 @@ final class CompositeIndexEventListener implements IndexEventListener {
|
||||||
try {
|
try {
|
||||||
listener.beforeIndexShardDeleted(shardId, indexSettings);
|
listener.beforeIndexShardDeleted(shardId, indexSettings);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to invoke before shard deleted callback", shardId.getId()), e);
|
logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke before shard deleted callback", shardId.getId()), e);
|
||||||
throw e;
|
throw e;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -220,7 +219,7 @@ final class CompositeIndexEventListener implements IndexEventListener {
|
||||||
try {
|
try {
|
||||||
listener.afterIndexShardDeleted(shardId, indexSettings);
|
listener.afterIndexShardDeleted(shardId, indexSettings);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to invoke after shard deleted callback", shardId.getId()), e);
|
logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke after shard deleted callback", shardId.getId()), e);
|
||||||
throw e;
|
throw e;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -431,8 +431,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
|
||||||
final boolean flushEngine = deleted.get() == false && closed.get();
|
final boolean flushEngine = deleted.get() == false && closed.get();
|
||||||
indexShard.close(reason, flushEngine);
|
indexShard.close(reason, flushEngine);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.debug((org.apache.logging.log4j.util.Supplier<?>)
|
logger.debug(() -> new ParameterizedMessage("[{}] failed to close index shard", shardId), e);
|
||||||
() -> new ParameterizedMessage("[{}] failed to close index shard", shardId), e);
|
|
||||||
// ignore
|
// ignore
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,7 +20,6 @@
|
||||||
package org.elasticsearch.index;
|
package org.elasticsearch.index;
|
||||||
|
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.apache.lucene.index.DirectoryReader;
|
import org.apache.lucene.index.DirectoryReader;
|
||||||
import org.elasticsearch.common.component.AbstractComponent;
|
import org.elasticsearch.common.component.AbstractComponent;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
|
@ -154,9 +153,7 @@ public final class IndexWarmer extends AbstractComponent {
|
||||||
indexShard
|
indexShard
|
||||||
.warmerService()
|
.warmerService()
|
||||||
.logger()
|
.logger()
|
||||||
.warn(
|
.warn(() -> new ParameterizedMessage("failed to warm-up global ordinals for [{}]", fieldType.name()), e);
|
||||||
(Supplier<?>) () -> new ParameterizedMessage(
|
|
||||||
"failed to warm-up global ordinals for [{}]", fieldType.name()), e);
|
|
||||||
} finally {
|
} finally {
|
||||||
latch.countDown();
|
latch.countDown();
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,7 +20,6 @@
|
||||||
package org.elasticsearch.index.cache.bitset;
|
package org.elasticsearch.index.cache.bitset;
|
||||||
|
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.IndexReader;
|
||||||
import org.apache.lucene.index.IndexReaderContext;
|
import org.apache.lucene.index.IndexReaderContext;
|
||||||
import org.apache.lucene.index.LeafReaderContext;
|
import org.apache.lucene.index.LeafReaderContext;
|
||||||
|
@ -263,7 +262,7 @@ public final class BitsetFilterCache extends AbstractIndexComponent implements I
|
||||||
indexShard.warmerService().logger().trace("warmed bitset for [{}], took [{}]", filterToWarm, TimeValue.timeValueNanos(System.nanoTime() - start));
|
indexShard.warmerService().logger().trace("warmed bitset for [{}], took [{}]", filterToWarm, TimeValue.timeValueNanos(System.nanoTime() - start));
|
||||||
}
|
}
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
indexShard.warmerService().logger().warn((Supplier<?>) () -> new ParameterizedMessage("failed to load bitset for [{}]", filterToWarm), e);
|
indexShard.warmerService().logger().warn(() -> new ParameterizedMessage("failed to load bitset for [{}]", filterToWarm), e);
|
||||||
} finally {
|
} finally {
|
||||||
latch.countDown();
|
latch.countDown();
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.index.engine;
|
||||||
|
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.apache.lucene.index.DirectoryReader;
|
import org.apache.lucene.index.DirectoryReader;
|
||||||
import org.apache.lucene.index.IndexCommit;
|
import org.apache.lucene.index.IndexCommit;
|
||||||
import org.apache.lucene.index.IndexFileNames;
|
import org.apache.lucene.index.IndexFileNames;
|
||||||
|
@ -597,7 +596,7 @@ public abstract class Engine implements Closeable {
|
||||||
try {
|
try {
|
||||||
directory = engineConfig.getCodec().compoundFormat().getCompoundReader(segmentReader.directory(), segmentCommitInfo.info, IOContext.READ);
|
directory = engineConfig.getCodec().compoundFormat().getCompoundReader(segmentReader.directory(), segmentCommitInfo.info, IOContext.READ);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("Error when opening compound reader for Directory [{}] and SegmentCommitInfo [{}]", segmentReader.directory(), segmentCommitInfo), e);
|
logger.warn(() -> new ParameterizedMessage("Error when opening compound reader for Directory [{}] and SegmentCommitInfo [{}]", segmentReader.directory(), segmentCommitInfo), e);
|
||||||
|
|
||||||
return ImmutableOpenMap.of();
|
return ImmutableOpenMap.of();
|
||||||
}
|
}
|
||||||
|
@ -613,15 +612,14 @@ public abstract class Engine implements Closeable {
|
||||||
files = directory.listAll();
|
files = directory.listAll();
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
final Directory finalDirectory = directory;
|
final Directory finalDirectory = directory;
|
||||||
logger.warn(
|
logger.warn(() -> new ParameterizedMessage("Couldn't list Compound Reader Directory [{}]", finalDirectory), e);
|
||||||
(Supplier<?>) () -> new ParameterizedMessage("Couldn't list Compound Reader Directory [{}]", finalDirectory), e);
|
|
||||||
return ImmutableOpenMap.of();
|
return ImmutableOpenMap.of();
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
try {
|
try {
|
||||||
files = segmentReader.getSegmentInfo().files().toArray(new String[]{});
|
files = segmentReader.getSegmentInfo().files().toArray(new String[]{});
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("Couldn't list Directory from SegmentReader [{}] and SegmentInfo [{}]", segmentReader, segmentReader.getSegmentInfo()), e);
|
logger.warn(() -> new ParameterizedMessage("Couldn't list Directory from SegmentReader [{}] and SegmentInfo [{}]", segmentReader, segmentReader.getSegmentInfo()), e);
|
||||||
return ImmutableOpenMap.of();
|
return ImmutableOpenMap.of();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -634,13 +632,10 @@ public abstract class Engine implements Closeable {
|
||||||
length = directory.fileLength(file);
|
length = directory.fileLength(file);
|
||||||
} catch (NoSuchFileException | FileNotFoundException e) {
|
} catch (NoSuchFileException | FileNotFoundException e) {
|
||||||
final Directory finalDirectory = directory;
|
final Directory finalDirectory = directory;
|
||||||
logger.warn((Supplier<?>)
|
logger.warn(() -> new ParameterizedMessage("Tried to query fileLength but file is gone [{}] [{}]", finalDirectory, file), e);
|
||||||
() -> new ParameterizedMessage("Tried to query fileLength but file is gone [{}] [{}]", finalDirectory, file), e);
|
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
final Directory finalDirectory = directory;
|
final Directory finalDirectory = directory;
|
||||||
logger.warn(
|
logger.warn(() -> new ParameterizedMessage("Error when trying to query fileLength [{}] [{}]", finalDirectory, file), e);
|
||||||
(Supplier<?>)
|
|
||||||
() -> new ParameterizedMessage("Error when trying to query fileLength [{}] [{}]", finalDirectory, file), e);
|
|
||||||
}
|
}
|
||||||
if (length == 0L) {
|
if (length == 0L) {
|
||||||
continue;
|
continue;
|
||||||
|
@ -653,9 +648,7 @@ public abstract class Engine implements Closeable {
|
||||||
directory.close();
|
directory.close();
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
final Directory finalDirectory = directory;
|
final Directory finalDirectory = directory;
|
||||||
logger.warn(
|
logger.warn(() -> new ParameterizedMessage("Error when closing compound reader on Directory [{}]", finalDirectory), e);
|
||||||
(Supplier<?>)
|
|
||||||
() -> new ParameterizedMessage("Error when closing compound reader on Directory [{}]", finalDirectory), e);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -706,7 +699,7 @@ public abstract class Engine implements Closeable {
|
||||||
try {
|
try {
|
||||||
segment.sizeInBytes = info.sizeInBytes();
|
segment.sizeInBytes = info.sizeInBytes();
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.trace((Supplier<?>) () -> new ParameterizedMessage("failed to get size for [{}]", info.info.name), e);
|
logger.trace(() -> new ParameterizedMessage("failed to get size for [{}]", info.info.name), e);
|
||||||
}
|
}
|
||||||
segments.put(info.info.name, segment);
|
segments.put(info.info.name, segment);
|
||||||
} else {
|
} else {
|
||||||
|
@ -732,7 +725,7 @@ public abstract class Engine implements Closeable {
|
||||||
try {
|
try {
|
||||||
segment.sizeInBytes = info.sizeInBytes();
|
segment.sizeInBytes = info.sizeInBytes();
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.trace((Supplier<?>) () -> new ParameterizedMessage("failed to get size for [{}]", info.info.name), e);
|
logger.trace(() -> new ParameterizedMessage("failed to get size for [{}]", info.info.name), e);
|
||||||
}
|
}
|
||||||
segment.memoryInBytes = segmentReader.ramBytesUsed();
|
segment.memoryInBytes = segmentReader.ramBytesUsed();
|
||||||
segment.segmentSort = info.info.getIndexSort();
|
segment.segmentSort = info.info.getIndexSort();
|
||||||
|
@ -880,7 +873,7 @@ public abstract class Engine implements Closeable {
|
||||||
store.incRef();
|
store.incRef();
|
||||||
try {
|
try {
|
||||||
if (failedEngine.get() != null) {
|
if (failedEngine.get() != null) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("tried to fail engine but engine is already failed. ignoring. [{}]", reason), failure);
|
logger.warn(() -> new ParameterizedMessage("tried to fail engine but engine is already failed. ignoring. [{}]", reason), failure);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
// this must happen before we close IW or Translog such that we can check this state to opt out of failing the engine
|
// this must happen before we close IW or Translog such that we can check this state to opt out of failing the engine
|
||||||
|
@ -890,7 +883,7 @@ public abstract class Engine implements Closeable {
|
||||||
// we just go and close this engine - no way to recover
|
// we just go and close this engine - no way to recover
|
||||||
closeNoLock("engine failed on: [" + reason + "]", closedLatch);
|
closeNoLock("engine failed on: [" + reason + "]", closedLatch);
|
||||||
} finally {
|
} finally {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed engine [{}]", reason), failure);
|
logger.warn(() -> new ParameterizedMessage("failed engine [{}]", reason), failure);
|
||||||
// we must set a failure exception, generate one if not supplied
|
// we must set a failure exception, generate one if not supplied
|
||||||
// we first mark the store as corrupted before we notify any listeners
|
// we first mark the store as corrupted before we notify any listeners
|
||||||
// this must happen first otherwise we might try to reallocate so quickly
|
// this must happen first otherwise we might try to reallocate so quickly
|
||||||
|
@ -913,7 +906,7 @@ public abstract class Engine implements Closeable {
|
||||||
store.decRef();
|
store.decRef();
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("tried to fail engine but could not acquire lock - engine should be failed by now [{}]", reason), failure);
|
logger.debug(() -> new ParameterizedMessage("tried to fail engine but could not acquire lock - engine should be failed by now [{}]", reason), failure);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.index.engine;
|
||||||
|
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.apache.lucene.index.DirectoryReader;
|
import org.apache.lucene.index.DirectoryReader;
|
||||||
import org.apache.lucene.index.IndexCommit;
|
import org.apache.lucene.index.IndexCommit;
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.IndexReader;
|
||||||
|
@ -1808,7 +1807,7 @@ public class InternalEngine extends Engine {
|
||||||
throw ex;
|
throw ex;
|
||||||
} catch (Exception ex) {
|
} catch (Exception ex) {
|
||||||
ensureOpen(ex); // throw EngineCloseException here if we are already closed
|
ensureOpen(ex); // throw EngineCloseException here if we are already closed
|
||||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("failed to acquire searcher, source {}", source), ex);
|
logger.error(() -> new ParameterizedMessage("failed to acquire searcher, source {}", source), ex);
|
||||||
throw new EngineException(shardId, "failed to acquire searcher, source " + source, ex);
|
throw new EngineException(shardId, "failed to acquire searcher, source " + source, ex);
|
||||||
} finally {
|
} finally {
|
||||||
Releasables.close(releasable);
|
Releasables.close(releasable);
|
||||||
|
|
|
@ -220,7 +220,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
||||||
// only update entries if needed
|
// only update entries if needed
|
||||||
updatedEntries = internalMerge(indexMetaData, MergeReason.MAPPING_RECOVERY, true);
|
updatedEntries = internalMerge(indexMetaData, MergeReason.MAPPING_RECOVERY, true);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.warn((org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage("[{}] failed to apply mappings", index()), e);
|
logger.warn(() -> new ParameterizedMessage("[{}] failed to apply mappings", index()), e);
|
||||||
throw e;
|
throw e;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.index.reindex;
|
||||||
|
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.elasticsearch.ExceptionsHelper;
|
import org.elasticsearch.ExceptionsHelper;
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
import org.elasticsearch.action.bulk.BackoffPolicy;
|
import org.elasticsearch.action.bulk.BackoffPolicy;
|
||||||
|
@ -106,7 +105,7 @@ public class ClientScrollableHitSource extends ScrollableHitSource {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(Exception e) {
|
public void onFailure(Exception e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("Failed to clear scroll [{}]", scrollId), e);
|
logger.warn(() -> new ParameterizedMessage("Failed to clear scroll [{}]", scrollId), e);
|
||||||
onCompletion.run();
|
onCompletion.run();
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
@ -155,12 +154,11 @@ public class ClientScrollableHitSource extends ScrollableHitSource {
|
||||||
if (retries.hasNext()) {
|
if (retries.hasNext()) {
|
||||||
retryCount += 1;
|
retryCount += 1;
|
||||||
TimeValue delay = retries.next();
|
TimeValue delay = retries.next();
|
||||||
logger.trace((Supplier<?>) () -> new ParameterizedMessage("retrying rejected search after [{}]", delay), e);
|
logger.trace(() -> new ParameterizedMessage("retrying rejected search after [{}]", delay), e);
|
||||||
countSearchRetry.run();
|
countSearchRetry.run();
|
||||||
threadPool.schedule(delay, ThreadPool.Names.SAME, retryWithContext);
|
threadPool.schedule(delay, ThreadPool.Names.SAME, retryWithContext);
|
||||||
} else {
|
} else {
|
||||||
logger.warn(
|
logger.warn(() -> new ParameterizedMessage(
|
||||||
(Supplier<?>) () -> new ParameterizedMessage(
|
|
||||||
"giving up on search because we retried [{}] times without success", retryCount), e);
|
"giving up on search because we retried [{}] times without success", retryCount), e);
|
||||||
fail.accept(e);
|
fail.accept(e);
|
||||||
}
|
}
|
||||||
|
|
|
@ -880,8 +880,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
||||||
try {
|
try {
|
||||||
sizeInBytes += info.sizeInBytes();
|
sizeInBytes += info.sizeInBytes();
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.trace((org.apache.logging.log4j.util.Supplier<?>)
|
logger.trace(() -> new ParameterizedMessage("failed to get size for [{}]", info.info.name), e);
|
||||||
() -> new ParameterizedMessage("failed to get size for [{}]", info.info.name), e);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,7 +20,6 @@ package org.elasticsearch.index.shard;
|
||||||
|
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.elasticsearch.index.engine.Engine;
|
import org.elasticsearch.index.engine.Engine;
|
||||||
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
@ -94,7 +93,7 @@ public interface IndexingOperationListener {
|
||||||
try {
|
try {
|
||||||
listener.preIndex(shardId, operation);
|
listener.preIndex(shardId, operation);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("preIndex listener [{}] failed", listener), e);
|
logger.warn(() -> new ParameterizedMessage("preIndex listener [{}] failed", listener), e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return operation;
|
return operation;
|
||||||
|
@ -107,7 +106,7 @@ public interface IndexingOperationListener {
|
||||||
try {
|
try {
|
||||||
listener.postIndex(shardId, index, result);
|
listener.postIndex(shardId, index, result);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("postIndex listener [{}] failed", listener), e);
|
logger.warn(() -> new ParameterizedMessage("postIndex listener [{}] failed", listener), e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -120,7 +119,7 @@ public interface IndexingOperationListener {
|
||||||
listener.postIndex(shardId, index, ex);
|
listener.postIndex(shardId, index, ex);
|
||||||
} catch (Exception inner) {
|
} catch (Exception inner) {
|
||||||
inner.addSuppressed(ex);
|
inner.addSuppressed(ex);
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("postIndex listener [{}] failed", listener), inner);
|
logger.warn(() -> new ParameterizedMessage("postIndex listener [{}] failed", listener), inner);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -132,7 +131,7 @@ public interface IndexingOperationListener {
|
||||||
try {
|
try {
|
||||||
listener.preDelete(shardId, delete);
|
listener.preDelete(shardId, delete);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("preDelete listener [{}] failed", listener), e);
|
logger.warn(() -> new ParameterizedMessage("preDelete listener [{}] failed", listener), e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return delete;
|
return delete;
|
||||||
|
@ -145,7 +144,7 @@ public interface IndexingOperationListener {
|
||||||
try {
|
try {
|
||||||
listener.postDelete(shardId, delete, result);
|
listener.postDelete(shardId, delete, result);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("postDelete listener [{}] failed", listener), e);
|
logger.warn(() -> new ParameterizedMessage("postDelete listener [{}] failed", listener), e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -158,7 +157,7 @@ public interface IndexingOperationListener {
|
||||||
listener.postDelete(shardId, delete, ex);
|
listener.postDelete(shardId, delete, ex);
|
||||||
} catch (Exception inner) {
|
} catch (Exception inner) {
|
||||||
inner.addSuppressed(ex);
|
inner.addSuppressed(ex);
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("postDelete listener [{}] failed", listener), inner);
|
logger.warn(() -> new ParameterizedMessage("postDelete listener [{}] failed", listener), inner);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,7 +20,6 @@ package org.elasticsearch.index.shard;
|
||||||
|
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.elasticsearch.ExceptionsHelper;
|
import org.elasticsearch.ExceptionsHelper;
|
||||||
import org.elasticsearch.search.internal.SearchContext;
|
import org.elasticsearch.search.internal.SearchContext;
|
||||||
import org.elasticsearch.transport.TransportRequest;
|
import org.elasticsearch.transport.TransportRequest;
|
||||||
|
@ -133,7 +132,7 @@ public interface SearchOperationListener {
|
||||||
try {
|
try {
|
||||||
listener.onPreQueryPhase(searchContext);
|
listener.onPreQueryPhase(searchContext);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("onPreQueryPhase listener [{}] failed", listener), e);
|
logger.warn(() -> new ParameterizedMessage("onPreQueryPhase listener [{}] failed", listener), e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -144,7 +143,7 @@ public interface SearchOperationListener {
|
||||||
try {
|
try {
|
||||||
listener.onFailedQueryPhase(searchContext);
|
listener.onFailedQueryPhase(searchContext);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("onFailedQueryPhase listener [{}] failed", listener), e);
|
logger.warn(() -> new ParameterizedMessage("onFailedQueryPhase listener [{}] failed", listener), e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -155,7 +154,7 @@ public interface SearchOperationListener {
|
||||||
try {
|
try {
|
||||||
listener.onQueryPhase(searchContext, tookInNanos);
|
listener.onQueryPhase(searchContext, tookInNanos);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("onQueryPhase listener [{}] failed", listener), e);
|
logger.warn(() -> new ParameterizedMessage("onQueryPhase listener [{}] failed", listener), e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -166,7 +165,7 @@ public interface SearchOperationListener {
|
||||||
try {
|
try {
|
||||||
listener.onPreFetchPhase(searchContext);
|
listener.onPreFetchPhase(searchContext);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("onPreFetchPhase listener [{}] failed", listener), e);
|
logger.warn(() -> new ParameterizedMessage("onPreFetchPhase listener [{}] failed", listener), e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -177,7 +176,7 @@ public interface SearchOperationListener {
|
||||||
try {
|
try {
|
||||||
listener.onFailedFetchPhase(searchContext);
|
listener.onFailedFetchPhase(searchContext);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("onFailedFetchPhase listener [{}] failed", listener), e);
|
logger.warn(() -> new ParameterizedMessage("onFailedFetchPhase listener [{}] failed", listener), e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -188,7 +187,7 @@ public interface SearchOperationListener {
|
||||||
try {
|
try {
|
||||||
listener.onFetchPhase(searchContext, tookInNanos);
|
listener.onFetchPhase(searchContext, tookInNanos);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("onFetchPhase listener [{}] failed", listener), e);
|
logger.warn(() -> new ParameterizedMessage("onFetchPhase listener [{}] failed", listener), e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -199,7 +198,7 @@ public interface SearchOperationListener {
|
||||||
try {
|
try {
|
||||||
listener.onNewContext(context);
|
listener.onNewContext(context);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("onNewContext listener [{}] failed", listener), e);
|
logger.warn(() -> new ParameterizedMessage("onNewContext listener [{}] failed", listener), e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -210,7 +209,7 @@ public interface SearchOperationListener {
|
||||||
try {
|
try {
|
||||||
listener.onFreeContext(context);
|
listener.onFreeContext(context);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("onFreeContext listener [{}] failed", listener), e);
|
logger.warn(() -> new ParameterizedMessage("onFreeContext listener [{}] failed", listener), e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -221,7 +220,7 @@ public interface SearchOperationListener {
|
||||||
try {
|
try {
|
||||||
listener.onNewScrollContext(context);
|
listener.onNewScrollContext(context);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("onNewScrollContext listener [{}] failed", listener), e);
|
logger.warn(() -> new ParameterizedMessage("onNewScrollContext listener [{}] failed", listener), e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -232,7 +231,7 @@ public interface SearchOperationListener {
|
||||||
try {
|
try {
|
||||||
listener.onFreeScrollContext(context);
|
listener.onFreeScrollContext(context);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("onFreeScrollContext listener [{}] failed", listener), e);
|
logger.warn(() -> new ParameterizedMessage("onFreeScrollContext listener [{}] failed", listener), e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.index.store;
|
||||||
|
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.apache.lucene.codecs.CodecUtil;
|
import org.apache.lucene.codecs.CodecUtil;
|
||||||
import org.apache.lucene.index.CheckIndex;
|
import org.apache.lucene.index.CheckIndex;
|
||||||
import org.apache.lucene.index.CorruptIndexException;
|
import org.apache.lucene.index.CorruptIndexException;
|
||||||
|
@ -329,7 +328,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
||||||
directory.deleteFile(origFile);
|
directory.deleteFile(origFile);
|
||||||
} catch (FileNotFoundException | NoSuchFileException e) {
|
} catch (FileNotFoundException | NoSuchFileException e) {
|
||||||
} catch (Exception ex) {
|
} catch (Exception ex) {
|
||||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to delete file [{}]", origFile), ex);
|
logger.debug(() -> new ParameterizedMessage("failed to delete file [{}]", origFile), ex);
|
||||||
}
|
}
|
||||||
// now, rename the files... and fail it it won't work
|
// now, rename the files... and fail it it won't work
|
||||||
directory.rename(tempFile, origFile);
|
directory.rename(tempFile, origFile);
|
||||||
|
@ -462,7 +461,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
||||||
} catch (FileNotFoundException | NoSuchFileException ex) {
|
} catch (FileNotFoundException | NoSuchFileException ex) {
|
||||||
logger.info("Failed to open / find files while reading metadata snapshot");
|
logger.info("Failed to open / find files while reading metadata snapshot");
|
||||||
} catch (ShardLockObtainFailedException ex) {
|
} catch (ShardLockObtainFailedException ex) {
|
||||||
logger.info((Supplier<?>) () -> new ParameterizedMessage("{}: failed to obtain shard lock", shardId), ex);
|
logger.info(() -> new ParameterizedMessage("{}: failed to obtain shard lock", shardId), ex);
|
||||||
}
|
}
|
||||||
return MetadataSnapshot.EMPTY;
|
return MetadataSnapshot.EMPTY;
|
||||||
}
|
}
|
||||||
|
@ -476,7 +475,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
||||||
try {
|
try {
|
||||||
tryOpenIndex(indexLocation, shardId, shardLocker, logger);
|
tryOpenIndex(indexLocation, shardId, shardLocker, logger);
|
||||||
} catch (Exception ex) {
|
} catch (Exception ex) {
|
||||||
logger.trace((Supplier<?>) () -> new ParameterizedMessage("Can't open index for path [{}]", indexLocation), ex);
|
logger.trace(() -> new ParameterizedMessage("Can't open index for path [{}]", indexLocation), ex);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
|
@ -676,7 +675,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
||||||
// if one of those files can't be deleted we better fail the cleanup otherwise we might leave an old commit point around?
|
// if one of those files can't be deleted we better fail the cleanup otherwise we might leave an old commit point around?
|
||||||
throw new IllegalStateException("Can't delete " + existingFile + " - cleanup failed", ex);
|
throw new IllegalStateException("Can't delete " + existingFile + " - cleanup failed", ex);
|
||||||
}
|
}
|
||||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to delete file [{}]", existingFile), ex);
|
logger.debug(() -> new ParameterizedMessage("failed to delete file [{}]", existingFile), ex);
|
||||||
// ignore, we don't really care, will get deleted later on
|
// ignore, we don't really care, will get deleted later on
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -886,7 +885,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
||||||
// Lucene checks the checksum after it tries to lookup the codec etc.
|
// Lucene checks the checksum after it tries to lookup the codec etc.
|
||||||
// in that case we might get only IAE or similar exceptions while we are really corrupt...
|
// in that case we might get only IAE or similar exceptions while we are really corrupt...
|
||||||
// TODO we should check the checksum in lucene if we hit an exception
|
// TODO we should check the checksum in lucene if we hit an exception
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to build store metadata. checking segment info integrity (with commit [{}])", commit == null ? "no" : "yes"), ex);
|
logger.warn(() -> new ParameterizedMessage("failed to build store metadata. checking segment info integrity (with commit [{}])", commit == null ? "no" : "yes"), ex);
|
||||||
Lucene.checkSegmentInfoIntegrity(directory);
|
Lucene.checkSegmentInfoIntegrity(directory);
|
||||||
} catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException cex) {
|
} catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException cex) {
|
||||||
cex.addSuppressed(ex);
|
cex.addSuppressed(ex);
|
||||||
|
@ -921,7 +920,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
||||||
}
|
}
|
||||||
|
|
||||||
} catch (Exception ex) {
|
} catch (Exception ex) {
|
||||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("Can retrieve checksum from file [{}]", file), ex);
|
logger.debug(() -> new ParameterizedMessage("Can retrieve checksum from file [{}]", file), ex);
|
||||||
throw ex;
|
throw ex;
|
||||||
}
|
}
|
||||||
builder.put(file, new StoreFileMetaData(file, length, checksum, version, fileHash.get()));
|
builder.put(file, new StoreFileMetaData(file, length, checksum, version, fileHash.get()));
|
||||||
|
|
|
@ -20,7 +20,6 @@
|
||||||
package org.elasticsearch.index.translog;
|
package org.elasticsearch.index.translog;
|
||||||
|
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.apache.lucene.index.Term;
|
import org.apache.lucene.index.Term;
|
||||||
import org.apache.lucene.store.AlreadyClosedException;
|
import org.apache.lucene.store.AlreadyClosedException;
|
||||||
import org.elasticsearch.core.internal.io.IOUtils;
|
import org.elasticsearch.core.internal.io.IOUtils;
|
||||||
|
@ -262,7 +261,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
||||||
try {
|
try {
|
||||||
Files.delete(tempFile);
|
Files.delete(tempFile);
|
||||||
} catch (IOException ex) {
|
} catch (IOException ex) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to delete temp file {}", tempFile), ex);
|
logger.warn(() -> new ParameterizedMessage("failed to delete temp file {}", tempFile), ex);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,7 +20,6 @@
|
||||||
package org.elasticsearch.indices;
|
package org.elasticsearch.indices;
|
||||||
|
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.apache.lucene.store.AlreadyClosedException;
|
import org.apache.lucene.store.AlreadyClosedException;
|
||||||
import org.elasticsearch.common.component.AbstractComponent;
|
import org.elasticsearch.common.component.AbstractComponent;
|
||||||
import org.elasticsearch.common.settings.Setting;
|
import org.elasticsearch.common.settings.Setting;
|
||||||
|
@ -179,7 +178,7 @@ public class IndexingMemoryController extends AbstractComponent implements Index
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(Exception e) {
|
public void onFailure(Exception e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to write indexing buffer for shard [{}]; ignoring", shard.shardId()), e);
|
logger.warn(() -> new ParameterizedMessage("failed to write indexing buffer for shard [{}]; ignoring", shard.shardId()), e);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
@ -384,7 +383,7 @@ public class IndexingMemoryController extends AbstractComponent implements Index
|
||||||
try {
|
try {
|
||||||
shard.checkIdle(inactiveTimeNS);
|
shard.checkIdle(inactiveTimeNS);
|
||||||
} catch (AlreadyClosedException e) {
|
} catch (AlreadyClosedException e) {
|
||||||
logger.trace((Supplier<?>) () -> new ParameterizedMessage("ignore exception while checking if shard {} is inactive", shard.shardId()), e);
|
logger.trace(() -> new ParameterizedMessage("ignore exception while checking if shard {} is inactive", shard.shardId()), e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,7 +19,6 @@
|
||||||
package org.elasticsearch.indices.analysis;
|
package org.elasticsearch.indices.analysis;
|
||||||
|
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.apache.lucene.analysis.hunspell.Dictionary;
|
import org.apache.lucene.analysis.hunspell.Dictionary;
|
||||||
import org.apache.lucene.store.Directory;
|
import org.apache.lucene.store.Directory;
|
||||||
import org.apache.lucene.store.SimpleFSDirectory;
|
import org.apache.lucene.store.SimpleFSDirectory;
|
||||||
|
@ -140,8 +139,7 @@ public class HunspellService extends AbstractComponent {
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
// The cache loader throws unchecked exception (see #loadDictionary()),
|
// The cache loader throws unchecked exception (see #loadDictionary()),
|
||||||
// here we simply report the exception and continue loading the dictionaries
|
// here we simply report the exception and continue loading the dictionaries
|
||||||
logger.error(
|
logger.error(() -> new ParameterizedMessage(
|
||||||
(Supplier<?>) () -> new ParameterizedMessage(
|
|
||||||
"exception while loading dictionary {}", file.getFileName()), e);
|
"exception while loading dictionary {}", file.getFileName()), e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -200,7 +198,7 @@ public class HunspellService extends AbstractComponent {
|
||||||
}
|
}
|
||||||
|
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("Could not load hunspell dictionary [{}]", locale), e);
|
logger.error(() -> new ParameterizedMessage("Could not load hunspell dictionary [{}]", locale), e);
|
||||||
throw e;
|
throw e;
|
||||||
} finally {
|
} finally {
|
||||||
IOUtils.close(affixStream);
|
IOUtils.close(affixStream);
|
||||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.indices.cluster;
|
||||||
|
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.apache.lucene.store.LockObtainFailedException;
|
import org.apache.lucene.store.LockObtainFailedException;
|
||||||
import org.elasticsearch.ResourceAlreadyExistsException;
|
import org.elasticsearch.ResourceAlreadyExistsException;
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
|
@ -307,8 +306,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
|
||||||
threadPool.generic().execute(new AbstractRunnable() {
|
threadPool.generic().execute(new AbstractRunnable() {
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(Exception e) {
|
public void onFailure(Exception e) {
|
||||||
logger.warn(
|
logger.warn(() -> new ParameterizedMessage("[{}] failed to complete pending deletion for index", index), e);
|
||||||
(Supplier<?>) () -> new ParameterizedMessage("[{}] failed to complete pending deletion for index", index), e);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -670,8 +668,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
|
||||||
// the node got closed on us, ignore it
|
// the node got closed on us, ignore it
|
||||||
} catch (Exception inner) {
|
} catch (Exception inner) {
|
||||||
inner.addSuppressed(failure);
|
inner.addSuppressed(failure);
|
||||||
logger.warn(
|
logger.warn(() -> new ParameterizedMessage(
|
||||||
(Supplier<?>) () -> new ParameterizedMessage(
|
|
||||||
"[{}][{}] failed to remove shard after failure ([{}])",
|
"[{}][{}] failed to remove shard after failure ([{}])",
|
||||||
shardRouting.getIndexName(),
|
shardRouting.getIndexName(),
|
||||||
shardRouting.getId(),
|
shardRouting.getId(),
|
||||||
|
@ -685,15 +682,13 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
|
||||||
|
|
||||||
private void sendFailShard(ShardRouting shardRouting, String message, @Nullable Exception failure, ClusterState state) {
|
private void sendFailShard(ShardRouting shardRouting, String message, @Nullable Exception failure, ClusterState state) {
|
||||||
try {
|
try {
|
||||||
logger.warn(
|
logger.warn(() -> new ParameterizedMessage(
|
||||||
(Supplier<?>) () -> new ParameterizedMessage(
|
|
||||||
"[{}] marking and sending shard failed due to [{}]", shardRouting.shardId(), message), failure);
|
"[{}] marking and sending shard failed due to [{}]", shardRouting.shardId(), message), failure);
|
||||||
failedShardsCache.put(shardRouting.shardId(), shardRouting);
|
failedShardsCache.put(shardRouting.shardId(), shardRouting);
|
||||||
shardStateAction.localShardFailed(shardRouting, message, failure, SHARD_STATE_ACTION_LISTENER, state);
|
shardStateAction.localShardFailed(shardRouting, message, failure, SHARD_STATE_ACTION_LISTENER, state);
|
||||||
} catch (Exception inner) {
|
} catch (Exception inner) {
|
||||||
if (failure != null) inner.addSuppressed(failure);
|
if (failure != null) inner.addSuppressed(failure);
|
||||||
logger.warn(
|
logger.warn(() -> new ParameterizedMessage(
|
||||||
(Supplier<?>) () -> new ParameterizedMessage(
|
|
||||||
"[{}][{}] failed to mark shard as failed (because of [{}])",
|
"[{}][{}] failed to mark shard as failed (because of [{}])",
|
||||||
shardRouting.getIndexName(),
|
shardRouting.getIndexName(),
|
||||||
shardRouting.getId(),
|
shardRouting.getId(),
|
||||||
|
|
|
@ -19,7 +19,6 @@
|
||||||
package org.elasticsearch.indices.flush;
|
package org.elasticsearch.indices.flush;
|
||||||
|
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.elasticsearch.ElasticsearchException;
|
import org.elasticsearch.ElasticsearchException;
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
|
@ -107,7 +106,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(Exception e) {
|
public void onFailure(Exception e) {
|
||||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} sync flush on inactive shard failed", indexShard.shardId()), e);
|
logger.debug(() -> new ParameterizedMessage("{} sync flush on inactive shard failed", indexShard.shardId()), e);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
@ -397,7 +396,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void handleException(TransportException exp) {
|
public void handleException(TransportException exp) {
|
||||||
logger.trace((Supplier<?>) () -> new ParameterizedMessage("{} error while performing synced flush on [{}], skipping", shardId, shard), exp);
|
logger.trace(() -> new ParameterizedMessage("{} error while performing synced flush on [{}], skipping", shardId, shard), exp);
|
||||||
results.put(shard, new ShardSyncedFlushResponse(exp.getMessage()));
|
results.put(shard, new ShardSyncedFlushResponse(exp.getMessage()));
|
||||||
countDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results);
|
countDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results);
|
||||||
}
|
}
|
||||||
|
@ -453,7 +452,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void handleException(TransportException exp) {
|
public void handleException(TransportException exp) {
|
||||||
logger.trace((Supplier<?>) () -> new ParameterizedMessage("{} error while performing pre synced flush on [{}], skipping", shardId, shard), exp);
|
logger.trace(() -> new ParameterizedMessage("{} error while performing pre synced flush on [{}], skipping", shardId, shard), exp);
|
||||||
if (countDown.countDown()) {
|
if (countDown.countDown()) {
|
||||||
listener.onResponse(presyncResponses);
|
listener.onResponse(presyncResponses);
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.indices.recovery;
|
||||||
|
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.apache.lucene.index.DirectoryReader;
|
import org.apache.lucene.index.DirectoryReader;
|
||||||
import org.apache.lucene.index.IndexCommit;
|
import org.apache.lucene.index.IndexCommit;
|
||||||
import org.apache.lucene.store.AlreadyClosedException;
|
import org.apache.lucene.store.AlreadyClosedException;
|
||||||
|
@ -144,8 +143,7 @@ public class PeerRecoveryTargetService extends AbstractComponent implements Inde
|
||||||
}
|
}
|
||||||
|
|
||||||
protected void retryRecovery(final long recoveryId, final Throwable reason, TimeValue retryAfter, TimeValue activityTimeout) {
|
protected void retryRecovery(final long recoveryId, final Throwable reason, TimeValue retryAfter, TimeValue activityTimeout) {
|
||||||
logger.trace(
|
logger.trace(() -> new ParameterizedMessage(
|
||||||
(Supplier<?>) () -> new ParameterizedMessage(
|
|
||||||
"will retry recovery with id [{}] in [{}]", recoveryId, retryAfter), reason);
|
"will retry recovery with id [{}] in [{}]", recoveryId, retryAfter), reason);
|
||||||
retryRecovery(recoveryId, retryAfter, activityTimeout);
|
retryRecovery(recoveryId, retryAfter, activityTimeout);
|
||||||
}
|
}
|
||||||
|
@ -229,12 +227,8 @@ public class PeerRecoveryTargetService extends AbstractComponent implements Inde
|
||||||
logger.trace("recovery cancelled", e);
|
logger.trace("recovery cancelled", e);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
if (logger.isTraceEnabled()) {
|
if (logger.isTraceEnabled()) {
|
||||||
logger.trace(
|
logger.trace(() -> new ParameterizedMessage(
|
||||||
(Supplier<?>) () -> new ParameterizedMessage(
|
"[{}][{}] Got exception on recovery", request.shardId().getIndex().getName(), request.shardId().id()), e);
|
||||||
"[{}][{}] Got exception on recovery",
|
|
||||||
request.shardId().getIndex().getName(),
|
|
||||||
request.shardId().id()),
|
|
||||||
e);
|
|
||||||
}
|
}
|
||||||
Throwable cause = ExceptionsHelper.unwrapCause(e);
|
Throwable cause = ExceptionsHelper.unwrapCause(e);
|
||||||
if (cause instanceof CancellableThreads.ExecutionCancelledException) {
|
if (cause instanceof CancellableThreads.ExecutionCancelledException) {
|
||||||
|
@ -532,12 +526,9 @@ public class PeerRecoveryTargetService extends AbstractComponent implements Inde
|
||||||
long currentVersion = future.get();
|
long currentVersion = future.get();
|
||||||
logger.trace("successfully waited for cluster state with version {} (current: {})", clusterStateVersion, currentVersion);
|
logger.trace("successfully waited for cluster state with version {} (current: {})", clusterStateVersion, currentVersion);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.debug(
|
logger.debug(() -> new ParameterizedMessage(
|
||||||
(Supplier<?>) () -> new ParameterizedMessage(
|
|
||||||
"failed waiting for cluster state with version {} (current: {})",
|
"failed waiting for cluster state with version {} (current: {})",
|
||||||
clusterStateVersion,
|
clusterStateVersion, clusterService.state().getVersion()), e);
|
||||||
clusterService.state().getVersion()),
|
|
||||||
e);
|
|
||||||
throw ExceptionsHelper.convertToRuntime(e);
|
throw ExceptionsHelper.convertToRuntime(e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -615,16 +606,13 @@ public class PeerRecoveryTargetService extends AbstractComponent implements Inde
|
||||||
public void onFailure(Exception e) {
|
public void onFailure(Exception e) {
|
||||||
try (RecoveryRef recoveryRef = onGoingRecoveries.getRecovery(recoveryId)) {
|
try (RecoveryRef recoveryRef = onGoingRecoveries.getRecovery(recoveryId)) {
|
||||||
if (recoveryRef != null) {
|
if (recoveryRef != null) {
|
||||||
logger.error(
|
logger.error(() -> new ParameterizedMessage("unexpected error during recovery [{}], failing shard", recoveryId), e);
|
||||||
(Supplier<?>) () -> new ParameterizedMessage(
|
|
||||||
"unexpected error during recovery [{}], failing shard", recoveryId), e);
|
|
||||||
onGoingRecoveries.failRecovery(recoveryId,
|
onGoingRecoveries.failRecovery(recoveryId,
|
||||||
new RecoveryFailedException(recoveryRef.target().state(), "unexpected error", e),
|
new RecoveryFailedException(recoveryRef.target().state(), "unexpected error", e),
|
||||||
true // be safe
|
true // be safe
|
||||||
);
|
);
|
||||||
} else {
|
} else {
|
||||||
logger.debug(
|
logger.debug(() -> new ParameterizedMessage(
|
||||||
(Supplier<?>) () -> new ParameterizedMessage(
|
|
||||||
"unexpected error during recovery, but recovery id [{}] is finished", recoveryId), e);
|
"unexpected error during recovery, but recovery id [{}] is finished", recoveryId), e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.indices.recovery;
|
||||||
|
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.elasticsearch.ElasticsearchTimeoutException;
|
import org.elasticsearch.ElasticsearchTimeoutException;
|
||||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||||
import org.elasticsearch.common.unit.TimeValue;
|
import org.elasticsearch.common.unit.TimeValue;
|
||||||
|
@ -269,7 +268,7 @@ public class RecoveriesCollection {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(Exception e) {
|
public void onFailure(Exception e) {
|
||||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected error while monitoring recovery [{}]", recoveryId), e);
|
logger.error(() -> new ParameterizedMessage("unexpected error while monitoring recovery [{}]", recoveryId), e);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -407,12 +407,9 @@ public class RecoverySourceHandler {
|
||||||
RemoteTransportException exception = new RemoteTransportException("File corruption occurred on recovery but " +
|
RemoteTransportException exception = new RemoteTransportException("File corruption occurred on recovery but " +
|
||||||
"checksums are ok", null);
|
"checksums are ok", null);
|
||||||
exception.addSuppressed(targetException);
|
exception.addSuppressed(targetException);
|
||||||
logger.warn(
|
logger.warn(() -> new ParameterizedMessage(
|
||||||
(org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage(
|
|
||||||
"{} Remote file corruption during finalization of recovery on node {}. local checksum OK",
|
"{} Remote file corruption during finalization of recovery on node {}. local checksum OK",
|
||||||
shard.shardId(),
|
shard.shardId(), request.targetNode()), corruptIndexException);
|
||||||
request.targetNode()),
|
|
||||||
corruptIndexException);
|
|
||||||
throw exception;
|
throw exception;
|
||||||
} else {
|
} else {
|
||||||
throw targetException;
|
throw targetException;
|
||||||
|
@ -681,13 +678,9 @@ public class RecoverySourceHandler {
|
||||||
RemoteTransportException exception = new RemoteTransportException("File corruption occurred on recovery but " +
|
RemoteTransportException exception = new RemoteTransportException("File corruption occurred on recovery but " +
|
||||||
"checksums are ok", null);
|
"checksums are ok", null);
|
||||||
exception.addSuppressed(e);
|
exception.addSuppressed(e);
|
||||||
logger.warn(
|
logger.warn(() -> new ParameterizedMessage(
|
||||||
(org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage(
|
|
||||||
"{} Remote file corruption on node {}, recovering {}. local checksum OK",
|
"{} Remote file corruption on node {}, recovering {}. local checksum OK",
|
||||||
shardId,
|
shardId, request.targetNode(), md), corruptIndexException);
|
||||||
request.targetNode(),
|
|
||||||
md),
|
|
||||||
corruptIndexException);
|
|
||||||
throw exception;
|
throw exception;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.indices.recovery;
|
||||||
|
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.apache.lucene.index.CorruptIndexException;
|
import org.apache.lucene.index.CorruptIndexException;
|
||||||
import org.apache.lucene.index.IndexFormatTooNewException;
|
import org.apache.lucene.index.IndexFormatTooNewException;
|
||||||
import org.apache.lucene.index.IndexFormatTooOldException;
|
import org.apache.lucene.index.IndexFormatTooOldException;
|
||||||
|
@ -331,8 +330,7 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget
|
||||||
try {
|
try {
|
||||||
entry.getValue().close();
|
entry.getValue().close();
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.debug(
|
logger.debug(() -> new ParameterizedMessage("error while closing recovery output [{}]", entry.getValue()), e);
|
||||||
(Supplier<?>) () -> new ParameterizedMessage("error while closing recovery output [{}]", entry.getValue()), e);
|
|
||||||
}
|
}
|
||||||
iterator.remove();
|
iterator.remove();
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,7 +20,6 @@
|
||||||
package org.elasticsearch.indices.store;
|
package org.elasticsearch.indices.store;
|
||||||
|
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||||
import org.elasticsearch.cluster.ClusterName;
|
import org.elasticsearch.cluster.ClusterName;
|
||||||
import org.elasticsearch.cluster.ClusterState;
|
import org.elasticsearch.cluster.ClusterState;
|
||||||
|
@ -256,7 +255,7 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void handleException(TransportException exp) {
|
public void handleException(TransportException exp) {
|
||||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("shards active request failed for {}", shardId), exp);
|
logger.debug(() -> new ParameterizedMessage("shards active request failed for {}", shardId), exp);
|
||||||
if (awaitingResponses.decrementAndGet() == 0) {
|
if (awaitingResponses.decrementAndGet() == 0) {
|
||||||
allNodesResponded();
|
allNodesResponded();
|
||||||
}
|
}
|
||||||
|
@ -288,10 +287,10 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe
|
||||||
try {
|
try {
|
||||||
indicesService.deleteShardStore("no longer used", shardId, currentState);
|
indicesService.deleteShardStore("no longer used", shardId, currentState);
|
||||||
} catch (Exception ex) {
|
} catch (Exception ex) {
|
||||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} failed to delete unallocated shard, ignoring", shardId), ex);
|
logger.debug(() -> new ParameterizedMessage("{} failed to delete unallocated shard, ignoring", shardId), ex);
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
(source, e) -> logger.error((Supplier<?>) () -> new ParameterizedMessage("{} unexpected error during deletion of unallocated shard", shardId), e)
|
(source, e) -> logger.error(() -> new ParameterizedMessage("{} unexpected error during deletion of unallocated shard", shardId), e)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -340,9 +339,9 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe
|
||||||
try {
|
try {
|
||||||
channel.sendResponse(new ShardActiveResponse(shardActive, clusterService.localNode()));
|
channel.sendResponse(new ShardActiveResponse(shardActive, clusterService.localNode()));
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("failed send response for shard active while trying to delete shard {} - shard will probably not be removed", request.shardId), e);
|
logger.error(() -> new ParameterizedMessage("failed send response for shard active while trying to delete shard {} - shard will probably not be removed", request.shardId), e);
|
||||||
} catch (EsRejectedExecutionException e) {
|
} catch (EsRejectedExecutionException e) {
|
||||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("failed send response for shard active while trying to delete shard {} - shard will probably not be removed", request.shardId), e);
|
logger.error(() -> new ParameterizedMessage("failed send response for shard active while trying to delete shard {} - shard will probably not be removed", request.shardId), e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}, newState -> {
|
}, newState -> {
|
||||||
|
|
|
@ -20,7 +20,6 @@
|
||||||
package org.elasticsearch.monitor.fs;
|
package org.elasticsearch.monitor.fs;
|
||||||
|
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.apache.lucene.util.Constants;
|
import org.apache.lucene.util.Constants;
|
||||||
import org.elasticsearch.cluster.ClusterInfo;
|
import org.elasticsearch.cluster.ClusterInfo;
|
||||||
import org.elasticsearch.cluster.DiskUsage;
|
import org.elasticsearch.cluster.DiskUsage;
|
||||||
|
@ -123,8 +122,7 @@ public class FsProbe extends AbstractComponent {
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
// do not fail Elasticsearch if something unexpected
|
// do not fail Elasticsearch if something unexpected
|
||||||
// happens here
|
// happens here
|
||||||
logger.debug(
|
logger.debug(() -> new ParameterizedMessage(
|
||||||
(Supplier<?>) () -> new ParameterizedMessage(
|
|
||||||
"unexpected exception processing /proc/diskstats for devices {}", devicesNumbers), e);
|
"unexpected exception processing /proc/diskstats for devices {}", devicesNumbers), e);
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,7 +20,6 @@ package org.elasticsearch.persistent;
|
||||||
|
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest;
|
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest;
|
||||||
import org.elasticsearch.common.Nullable;
|
import org.elasticsearch.common.Nullable;
|
||||||
|
@ -148,8 +147,7 @@ public class AllocatedPersistentTask extends CancellableTask {
|
||||||
logger.warn("attempt to complete task [{}] with id [{}] in the [{}] state", getAction(), getPersistentTaskId(), prevState);
|
logger.warn("attempt to complete task [{}] with id [{}] in the [{}] state", getAction(), getPersistentTaskId(), prevState);
|
||||||
} else {
|
} else {
|
||||||
if (failure != null) {
|
if (failure != null) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage(
|
logger.warn(() -> new ParameterizedMessage("task {} failed with an exception", getPersistentTaskId()), failure);
|
||||||
"task {} failed with an exception", getPersistentTaskId()), failure);
|
|
||||||
}
|
}
|
||||||
try {
|
try {
|
||||||
this.failure = failure;
|
this.failure = failure;
|
||||||
|
@ -165,9 +163,8 @@ public class AllocatedPersistentTask extends CancellableTask {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(Exception e) {
|
public void onFailure(Exception e) {
|
||||||
logger.warn((Supplier<?>) () ->
|
logger.warn(() -> new ParameterizedMessage(
|
||||||
new ParameterizedMessage("notification for task [{}] with id [{}] failed",
|
"notification for task [{}] with id [{}] failed", getAction(), getPersistentTaskId()), e);
|
||||||
getAction(), getPersistentTaskId()), e);
|
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,7 +19,6 @@
|
||||||
package org.elasticsearch.persistent;
|
package org.elasticsearch.persistent;
|
||||||
|
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse;
|
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse;
|
||||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||||
|
@ -207,9 +206,9 @@ public class PersistentTasksNodeService extends AbstractComponent implements Clu
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(Exception e) {
|
public void onFailure(Exception e) {
|
||||||
// There is really nothing we can do in case of failure here
|
// There is really nothing we can do in case of failure here
|
||||||
logger.warn((Supplier<?>) () ->
|
logger.warn(() -> new ParameterizedMessage(
|
||||||
new ParameterizedMessage("failed to cancel task [{}] with id [{}] and allocation id [{}]", task.getAction(),
|
"failed to cancel task [{}] with id [{}] and allocation id [{}]",
|
||||||
task.getPersistentTaskId(), task.getAllocationId()), e);
|
task.getAction(), task.getPersistentTaskId(), task.getAllocationId()), e);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,7 +20,6 @@
|
||||||
package org.elasticsearch.repositories;
|
package org.elasticsearch.repositories;
|
||||||
|
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
import org.elasticsearch.cluster.AckedClusterStateUpdateTask;
|
import org.elasticsearch.cluster.AckedClusterStateUpdateTask;
|
||||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||||
|
@ -142,7 +141,7 @@ public class RepositoriesService extends AbstractComponent implements ClusterSta
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(String source, Exception e) {
|
public void onFailure(String source, Exception e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to create repository [{}]", request.name), e);
|
logger.warn(() -> new ParameterizedMessage("failed to create repository [{}]", request.name), e);
|
||||||
super.onFailure(source, e);
|
super.onFailure(source, e);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -217,7 +216,7 @@ public class RepositoriesService extends AbstractComponent implements ClusterSta
|
||||||
try {
|
try {
|
||||||
repository.endVerification(verificationToken);
|
repository.endVerification(verificationToken);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to finish repository verification", repositoryName), e);
|
logger.warn(() -> new ParameterizedMessage("[{}] failed to finish repository verification", repositoryName), e);
|
||||||
listener.onFailure(e);
|
listener.onFailure(e);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -234,7 +233,7 @@ public class RepositoriesService extends AbstractComponent implements ClusterSta
|
||||||
repository.endVerification(verificationToken);
|
repository.endVerification(verificationToken);
|
||||||
} catch (Exception inner) {
|
} catch (Exception inner) {
|
||||||
inner.addSuppressed(e);
|
inner.addSuppressed(e);
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to finish repository verification", repositoryName), inner);
|
logger.warn(() -> new ParameterizedMessage("[{}] failed to finish repository verification", repositoryName), inner);
|
||||||
}
|
}
|
||||||
listener.onFailure(e);
|
listener.onFailure(e);
|
||||||
}
|
}
|
||||||
|
@ -296,14 +295,14 @@ public class RepositoriesService extends AbstractComponent implements ClusterSta
|
||||||
} catch (RepositoryException ex) {
|
} catch (RepositoryException ex) {
|
||||||
// TODO: this catch is bogus, it means the old repo is already closed,
|
// TODO: this catch is bogus, it means the old repo is already closed,
|
||||||
// but we have nothing to replace it
|
// but we have nothing to replace it
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to change repository [{}]", repositoryMetaData.name()), ex);
|
logger.warn(() -> new ParameterizedMessage("failed to change repository [{}]", repositoryMetaData.name()), ex);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
try {
|
try {
|
||||||
repository = createRepository(repositoryMetaData);
|
repository = createRepository(repositoryMetaData);
|
||||||
} catch (RepositoryException ex) {
|
} catch (RepositoryException ex) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to create repository [{}]", repositoryMetaData.name()), ex);
|
logger.warn(() -> new ParameterizedMessage("failed to create repository [{}]", repositoryMetaData.name()), ex);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (repository != null) {
|
if (repository != null) {
|
||||||
|
@ -385,7 +384,7 @@ public class RepositoriesService extends AbstractComponent implements ClusterSta
|
||||||
repository.start();
|
repository.start();
|
||||||
return repository;
|
return repository;
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to create repository [{}][{}]", repositoryMetaData.type(), repositoryMetaData.name()), e);
|
logger.warn(() -> new ParameterizedMessage("failed to create repository [{}][{}]", repositoryMetaData.type(), repositoryMetaData.name()), e);
|
||||||
throw new RepositoryException(repositoryMetaData.name(), "failed to create repository", e);
|
throw new RepositoryException(repositoryMetaData.name(), "failed to create repository", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,7 +22,6 @@ package org.elasticsearch.repositories;
|
||||||
import com.carrotsearch.hppc.ObjectContainer;
|
import com.carrotsearch.hppc.ObjectContainer;
|
||||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||||
|
@ -81,7 +80,7 @@ public class VerifyNodeRepositoryAction extends AbstractComponent {
|
||||||
try {
|
try {
|
||||||
doVerify(repository, verificationToken, localNode);
|
doVerify(repository, verificationToken, localNode);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to verify repository", repository), e);
|
logger.warn(() -> new ParameterizedMessage("[{}] failed to verify repository", repository), e);
|
||||||
errors.add(new VerificationFailure(node.getId(), e));
|
errors.add(new VerificationFailure(node.getId(), e));
|
||||||
}
|
}
|
||||||
if (counter.decrementAndGet() == 0) {
|
if (counter.decrementAndGet() == 0) {
|
||||||
|
@ -152,7 +151,7 @@ public class VerifyNodeRepositoryAction extends AbstractComponent {
|
||||||
try {
|
try {
|
||||||
doVerify(request.repository, request.verificationToken, localNode);
|
doVerify(request.repository, request.verificationToken, localNode);
|
||||||
} catch (Exception ex) {
|
} catch (Exception ex) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to verify repository", request.repository), ex);
|
logger.warn(() -> new ParameterizedMessage("[{}] failed to verify repository", request.repository), ex);
|
||||||
throw ex;
|
throw ex;
|
||||||
}
|
}
|
||||||
channel.sendResponse(TransportResponse.Empty.INSTANCE);
|
channel.sendResponse(TransportResponse.Empty.INSTANCE);
|
||||||
|
|
|
@ -20,7 +20,6 @@
|
||||||
package org.elasticsearch.repositories.blobstore;
|
package org.elasticsearch.repositories.blobstore;
|
||||||
|
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.apache.lucene.index.CorruptIndexException;
|
import org.apache.lucene.index.CorruptIndexException;
|
||||||
import org.apache.lucene.index.IndexCommit;
|
import org.apache.lucene.index.IndexCommit;
|
||||||
import org.apache.lucene.index.IndexFormatTooNewException;
|
import org.apache.lucene.index.IndexFormatTooNewException;
|
||||||
|
@ -351,7 +350,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
||||||
} catch (SnapshotMissingException ex) {
|
} catch (SnapshotMissingException ex) {
|
||||||
throw ex;
|
throw ex;
|
||||||
} catch (IllegalStateException | SnapshotException | ElasticsearchParseException ex) {
|
} catch (IllegalStateException | SnapshotException | ElasticsearchParseException ex) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("cannot read snapshot file [{}]", snapshotId), ex);
|
logger.warn(() -> new ParameterizedMessage("cannot read snapshot file [{}]", snapshotId), ex);
|
||||||
}
|
}
|
||||||
MetaData metaData = null;
|
MetaData metaData = null;
|
||||||
try {
|
try {
|
||||||
|
@ -361,7 +360,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
||||||
metaData = readSnapshotMetaData(snapshotId, null, repositoryData.resolveIndices(indices), true);
|
metaData = readSnapshotMetaData(snapshotId, null, repositoryData.resolveIndices(indices), true);
|
||||||
}
|
}
|
||||||
} catch (IOException | SnapshotException ex) {
|
} catch (IOException | SnapshotException ex) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("cannot read metadata for snapshot [{}]", snapshotId), ex);
|
logger.warn(() -> new ParameterizedMessage("cannot read metadata for snapshot [{}]", snapshotId), ex);
|
||||||
}
|
}
|
||||||
try {
|
try {
|
||||||
// Delete snapshot from the index file, since it is the maintainer of truth of active snapshots
|
// Delete snapshot from the index file, since it is the maintainer of truth of active snapshots
|
||||||
|
@ -381,7 +380,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
||||||
try {
|
try {
|
||||||
indexMetaDataFormat.delete(indexMetaDataBlobContainer, snapshotId.getUUID());
|
indexMetaDataFormat.delete(indexMetaDataBlobContainer, snapshotId.getUUID());
|
||||||
} catch (IOException ex) {
|
} catch (IOException ex) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to delete metadata for index [{}]", snapshotId, index), ex);
|
logger.warn(() -> new ParameterizedMessage("[{}] failed to delete metadata for index [{}]", snapshotId, index), ex);
|
||||||
}
|
}
|
||||||
if (metaData != null) {
|
if (metaData != null) {
|
||||||
IndexMetaData indexMetaData = metaData.index(index);
|
IndexMetaData indexMetaData = metaData.index(index);
|
||||||
|
@ -391,7 +390,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
||||||
delete(snapshotId, snapshot.version(), indexId, new ShardId(indexMetaData.getIndex(), shardId));
|
delete(snapshotId, snapshot.version(), indexId, new ShardId(indexMetaData.getIndex(), shardId));
|
||||||
} catch (SnapshotException ex) {
|
} catch (SnapshotException ex) {
|
||||||
final int finalShardId = shardId;
|
final int finalShardId = shardId;
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to delete shard data for shard [{}][{}]", snapshotId, index, finalShardId), ex);
|
logger.warn(() -> new ParameterizedMessage("[{}] failed to delete shard data for shard [{}][{}]", snapshotId, index, finalShardId), ex);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -410,11 +409,11 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
||||||
// we'll ignore that and accept that cleanup didn't fully succeed.
|
// we'll ignore that and accept that cleanup didn't fully succeed.
|
||||||
// since we are using UUIDs for path names, this won't be an issue for
|
// since we are using UUIDs for path names, this won't be an issue for
|
||||||
// snapshotting indices of the same name
|
// snapshotting indices of the same name
|
||||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] index [{}] no longer part of any snapshots in the repository, but failed to clean up " +
|
logger.debug(() -> new ParameterizedMessage("[{}] index [{}] no longer part of any snapshots in the repository, but failed to clean up " +
|
||||||
"its index folder due to the directory not being empty.", metadata.name(), indexId), dnee);
|
"its index folder due to the directory not being empty.", metadata.name(), indexId), dnee);
|
||||||
} catch (IOException ioe) {
|
} catch (IOException ioe) {
|
||||||
// a different IOException occurred while trying to delete - will just log the issue for now
|
// a different IOException occurred while trying to delete - will just log the issue for now
|
||||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] index [{}] no longer part of any snapshots in the repository, but failed to clean up " +
|
logger.debug(() -> new ParameterizedMessage("[{}] index [{}] no longer part of any snapshots in the repository, but failed to clean up " +
|
||||||
"its index folder.", metadata.name(), indexId), ioe);
|
"its index folder.", metadata.name(), indexId), ioe);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -428,10 +427,10 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
||||||
snapshotFormat.delete(snapshotsBlobContainer, blobId);
|
snapshotFormat.delete(snapshotsBlobContainer, blobId);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
if (snapshotInfo != null) {
|
if (snapshotInfo != null) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] Unable to delete snapshot file [{}]",
|
logger.warn(() -> new ParameterizedMessage("[{}] Unable to delete snapshot file [{}]",
|
||||||
snapshotInfo.snapshotId(), blobId), e);
|
snapshotInfo.snapshotId(), blobId), e);
|
||||||
} else {
|
} else {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("Unable to delete snapshot file [{}]", blobId), e);
|
logger.warn(() -> new ParameterizedMessage("Unable to delete snapshot file [{}]", blobId), e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -441,10 +440,10 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
||||||
globalMetaDataFormat.delete(snapshotsBlobContainer, blobId);
|
globalMetaDataFormat.delete(snapshotsBlobContainer, blobId);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
if (snapshotInfo != null) {
|
if (snapshotInfo != null) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] Unable to delete global metadata file [{}]",
|
logger.warn(() -> new ParameterizedMessage("[{}] Unable to delete global metadata file [{}]",
|
||||||
snapshotInfo.snapshotId(), blobId), e);
|
snapshotInfo.snapshotId(), blobId), e);
|
||||||
} else {
|
} else {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("Unable to delete global metadata file [{}]", blobId), e);
|
logger.warn(() -> new ParameterizedMessage("Unable to delete global metadata file [{}]", blobId), e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -522,7 +521,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
||||||
metaDataBuilder.put(indexMetaDataFormat.read(indexMetaDataBlobContainer, snapshotId.getUUID()), false);
|
metaDataBuilder.put(indexMetaDataFormat.read(indexMetaDataBlobContainer, snapshotId.getUUID()), false);
|
||||||
} catch (ElasticsearchParseException | IOException ex) {
|
} catch (ElasticsearchParseException | IOException ex) {
|
||||||
if (ignoreIndexErrors) {
|
if (ignoreIndexErrors) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] [{}] failed to read metadata for index", snapshotId, index.getName()), ex);
|
logger.warn(() -> new ParameterizedMessage("[{}] [{}] failed to read metadata for index", snapshotId, index.getName()), ex);
|
||||||
} else {
|
} else {
|
||||||
throw ex;
|
throw ex;
|
||||||
}
|
}
|
||||||
|
@ -983,7 +982,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
||||||
blobContainer.deleteBlob(blobName);
|
blobContainer.deleteBlob(blobName);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
// TODO: don't catch and let the user handle it?
|
// TODO: don't catch and let the user handle it?
|
||||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] [{}] error deleting blob [{}] during cleanup", snapshotId, shardId, blobName), e);
|
logger.debug(() -> new ParameterizedMessage("[{}] [{}] error deleting blob [{}] during cleanup", snapshotId, shardId, blobName), e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1062,7 +1061,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
||||||
return new Tuple<>(shardSnapshots, latest);
|
return new Tuple<>(shardSnapshots, latest);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
final String file = SNAPSHOT_INDEX_PREFIX + latest;
|
final String file = SNAPSHOT_INDEX_PREFIX + latest;
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to read index file [{}]", file), e);
|
logger.warn(() -> new ParameterizedMessage("failed to read index file [{}]", file), e);
|
||||||
}
|
}
|
||||||
} else if (blobKeys.isEmpty() == false) {
|
} else if (blobKeys.isEmpty() == false) {
|
||||||
logger.debug("Could not find a readable index-N file in a non-empty shard snapshot directory [{}]", blobContainer.path());
|
logger.debug("Could not find a readable index-N file in a non-empty shard snapshot directory [{}]", blobContainer.path());
|
||||||
|
@ -1080,7 +1079,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
||||||
snapshots.add(new SnapshotFiles(snapshot.snapshot(), snapshot.indexFiles()));
|
snapshots.add(new SnapshotFiles(snapshot.snapshot(), snapshot.indexFiles()));
|
||||||
}
|
}
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to read commit point [{}]", name), e);
|
logger.warn(() -> new ParameterizedMessage("failed to read commit point [{}]", name), e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return new Tuple<>(new BlobStoreIndexShardSnapshots(snapshots), -1);
|
return new Tuple<>(new BlobStoreIndexShardSnapshots(snapshots), -1);
|
||||||
|
@ -1166,7 +1165,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
||||||
// in a bwc compatible way.
|
// in a bwc compatible way.
|
||||||
maybeRecalculateMetadataHash(blobContainer, fileInfo, metadata);
|
maybeRecalculateMetadataHash(blobContainer, fileInfo, metadata);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("{} Can't calculate hash from blob for file [{}] [{}]", shardId, fileInfo.physicalName(), fileInfo.metadata()), e);
|
logger.warn(() -> new ParameterizedMessage("{} Can't calculate hash from blob for file [{}] [{}]", shardId, fileInfo.physicalName(), fileInfo.metadata()), e);
|
||||||
}
|
}
|
||||||
if (fileInfo.isSame(md) && snapshotFileExistsInBlobs(fileInfo, blobs)) {
|
if (fileInfo.isSame(md) && snapshotFileExistsInBlobs(fileInfo, blobs)) {
|
||||||
// a commit point file with the same name, size and checksum was already copied to repository
|
// a commit point file with the same name, size and checksum was already copied to repository
|
||||||
|
@ -1441,7 +1440,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
||||||
logger.trace("[{}] [{}] restoring from to an empty shard", shardId, snapshotId);
|
logger.trace("[{}] [{}] restoring from to an empty shard", shardId, snapshotId);
|
||||||
recoveryTargetMetadata = Store.MetadataSnapshot.EMPTY;
|
recoveryTargetMetadata = Store.MetadataSnapshot.EMPTY;
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("{} Can't read metadata from store, will not reuse any local file while restoring", shardId), e);
|
logger.warn(() -> new ParameterizedMessage("{} Can't read metadata from store, will not reuse any local file while restoring", shardId), e);
|
||||||
recoveryTargetMetadata = Store.MetadataSnapshot.EMPTY;
|
recoveryTargetMetadata = Store.MetadataSnapshot.EMPTY;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1457,7 +1456,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
||||||
maybeRecalculateMetadataHash(blobContainer, fileInfo, recoveryTargetMetadata);
|
maybeRecalculateMetadataHash(blobContainer, fileInfo, recoveryTargetMetadata);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
// if the index is broken we might not be able to read it
|
// if the index is broken we might not be able to read it
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("{} Can't calculate hash from blog for file [{}] [{}]", shardId, fileInfo.physicalName(), fileInfo.metadata()), e);
|
logger.warn(() -> new ParameterizedMessage("{} Can't calculate hash from blog for file [{}] [{}]", shardId, fileInfo.physicalName(), fileInfo.metadata()), e);
|
||||||
}
|
}
|
||||||
snapshotMetaData.put(fileInfo.metadata().name(), fileInfo.metadata());
|
snapshotMetaData.put(fileInfo.metadata().name(), fileInfo.metadata());
|
||||||
fileInfos.put(fileInfo.metadata().name(), fileInfo);
|
fileInfos.put(fileInfo.metadata().name(), fileInfo);
|
||||||
|
|
|
@ -24,7 +24,6 @@ import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
import org.elasticsearch.action.support.IndicesOptions;
|
import org.elasticsearch.action.support.IndicesOptions;
|
||||||
|
@ -455,7 +454,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateApp
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(String source, Exception e) {
|
public void onFailure(String source, Exception e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to restore snapshot", snapshotId), e);
|
logger.warn(() -> new ParameterizedMessage("[{}] failed to restore snapshot", snapshotId), e);
|
||||||
listener.onFailure(e);
|
listener.onFailure(e);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -472,7 +471,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateApp
|
||||||
|
|
||||||
|
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to restore snapshot", request.repositoryName + ":" + request.snapshotName), e);
|
logger.warn(() -> new ParameterizedMessage("[{}] failed to restore snapshot", request.repositoryName + ":" + request.snapshotName), e);
|
||||||
listener.onFailure(e);
|
listener.onFailure(e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -679,7 +678,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateApp
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(final String source, final Exception e) {
|
public void onFailure(final String source, final Exception e) {
|
||||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
|
logger.error(() -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.snapshots;
|
||||||
|
|
||||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.apache.lucene.util.SetOnce;
|
import org.apache.lucene.util.SetOnce;
|
||||||
import org.elasticsearch.ExceptionsHelper;
|
import org.elasticsearch.ExceptionsHelper;
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
|
@ -340,8 +339,7 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(Exception e) {
|
public void onFailure(Exception e) {
|
||||||
logger.warn((Supplier<?>) () ->
|
logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to snapshot shard", shardId, snapshot), e);
|
||||||
new ParameterizedMessage("[{}][{}] failed to snapshot shard", shardId, snapshot), e);
|
|
||||||
failure.set(e);
|
failure.set(e);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -531,7 +529,7 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements
|
||||||
UpdateIndexShardSnapshotStatusRequest request = new UpdateIndexShardSnapshotStatusRequest(snapshot, shardId, status);
|
UpdateIndexShardSnapshotStatusRequest request = new UpdateIndexShardSnapshotStatusRequest(snapshot, shardId, status);
|
||||||
transportService.sendRequest(transportService.getLocalNode(), UPDATE_SNAPSHOT_STATUS_ACTION_NAME, request, INSTANCE_SAME);
|
transportService.sendRequest(transportService.getLocalNode(), UPDATE_SNAPSHOT_STATUS_ACTION_NAME, request, INSTANCE_SAME);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] [{}] failed to update snapshot state", snapshot, status), e);
|
logger.warn(() -> new ParameterizedMessage("[{}] [{}] failed to update snapshot state", snapshot, status), e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -22,7 +22,6 @@ package org.elasticsearch.snapshots;
|
||||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.apache.lucene.util.CollectionUtil;
|
import org.apache.lucene.util.CollectionUtil;
|
||||||
import org.elasticsearch.ExceptionsHelper;
|
import org.elasticsearch.ExceptionsHelper;
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
|
@ -192,7 +191,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
|
||||||
}
|
}
|
||||||
} catch (Exception ex) {
|
} catch (Exception ex) {
|
||||||
if (ignoreUnavailable) {
|
if (ignoreUnavailable) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to get snapshot [{}]", snapshotId), ex);
|
logger.warn(() -> new ParameterizedMessage("failed to get snapshot [{}]", snapshotId), ex);
|
||||||
} else {
|
} else {
|
||||||
throw new SnapshotException(repositoryName, snapshotId, "Snapshot could not be read", ex);
|
throw new SnapshotException(repositoryName, snapshotId, "Snapshot could not be read", ex);
|
||||||
}
|
}
|
||||||
|
@ -270,7 +269,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(String source, Exception e) {
|
public void onFailure(String source, Exception e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}][{}] failed to create snapshot", repositoryName, snapshotName), e);
|
logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to create snapshot", repositoryName, snapshotName), e);
|
||||||
newSnapshot = null;
|
newSnapshot = null;
|
||||||
listener.onFailure(e);
|
listener.onFailure(e);
|
||||||
}
|
}
|
||||||
|
@ -432,7 +431,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(String source, Exception e) {
|
public void onFailure(String source, Exception e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to create snapshot", snapshot.snapshot().getSnapshotId()), e);
|
logger.warn(() -> new ParameterizedMessage("[{}] failed to create snapshot", snapshot.snapshot().getSnapshotId()), e);
|
||||||
removeSnapshotFromClusterState(snapshot.snapshot(), null, e, new CleanupAfterErrorListener(snapshot, true, userCreateSnapshotListener, e));
|
removeSnapshotFromClusterState(snapshot.snapshot(), null, e, new CleanupAfterErrorListener(snapshot, true, userCreateSnapshotListener, e));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -463,7 +462,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to create snapshot [{}]", snapshot.snapshot().getSnapshotId()), e);
|
logger.warn(() -> new ParameterizedMessage("failed to create snapshot [{}]", snapshot.snapshot().getSnapshotId()), e);
|
||||||
removeSnapshotFromClusterState(snapshot.snapshot(), null, e, new CleanupAfterErrorListener(snapshot, snapshotCreated, userCreateSnapshotListener, e));
|
removeSnapshotFromClusterState(snapshot.snapshot(), null, e, new CleanupAfterErrorListener(snapshot, snapshotCreated, userCreateSnapshotListener, e));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -511,7 +510,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
|
||||||
snapshot.includeGlobalState());
|
snapshot.includeGlobalState());
|
||||||
} catch (Exception inner) {
|
} catch (Exception inner) {
|
||||||
inner.addSuppressed(exception);
|
inner.addSuppressed(exception);
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to close snapshot in repository", snapshot.snapshot()), inner);
|
logger.warn(() -> new ParameterizedMessage("[{}] failed to close snapshot in repository", snapshot.snapshot()), inner);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
userCreateSnapshotListener.onFailure(e);
|
userCreateSnapshotListener.onFailure(e);
|
||||||
|
@ -824,7 +823,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(String source, Exception e) {
|
public void onFailure(String source, Exception e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to update snapshot state after shards started from [{}] ", source), e);
|
logger.warn(() -> new ParameterizedMessage("failed to update snapshot state after shards started from [{}] ", source), e);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
@ -983,7 +982,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
|
||||||
removeSnapshotFromClusterState(snapshot, snapshotInfo, null);
|
removeSnapshotFromClusterState(snapshot, snapshotInfo, null);
|
||||||
logger.info("snapshot [{}] completed with state [{}]", snapshot, snapshotInfo.state());
|
logger.info("snapshot [{}] completed with state [{}]", snapshot, snapshotInfo.state());
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to finalize snapshot", snapshot), e);
|
logger.warn(() -> new ParameterizedMessage("[{}] failed to finalize snapshot", snapshot), e);
|
||||||
removeSnapshotFromClusterState(snapshot, null, e);
|
removeSnapshotFromClusterState(snapshot, null, e);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
@ -1032,7 +1031,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(String source, Exception e) {
|
public void onFailure(String source, Exception e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to remove snapshot metadata", snapshot), e);
|
logger.warn(() -> new ParameterizedMessage("[{}] failed to remove snapshot metadata", snapshot), e);
|
||||||
if (listener != null) {
|
if (listener != null) {
|
||||||
listener.onFailure(e);
|
listener.onFailure(e);
|
||||||
}
|
}
|
||||||
|
@ -1055,7 +1054,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
|
||||||
listener.onSnapshotFailure(snapshot, failure);
|
listener.onSnapshotFailure(snapshot, failure);
|
||||||
}
|
}
|
||||||
} catch (Exception t) {
|
} catch (Exception t) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to notify listener [{}]", listener), t);
|
logger.warn(() -> new ParameterizedMessage("failed to notify listener [{}]", listener), t);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (listener != null) {
|
if (listener != null) {
|
||||||
|
@ -1224,8 +1223,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
|
||||||
listener, true);
|
listener, true);
|
||||||
|
|
||||||
} catch (Exception ex) {
|
} catch (Exception ex) {
|
||||||
logger.warn((Supplier<?>) () ->
|
logger.warn(() -> new ParameterizedMessage("[{}] failed to delete snapshot", snapshot), ex);
|
||||||
new ParameterizedMessage("[{}] failed to delete snapshot", snapshot), ex);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
@ -1244,7 +1242,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
|
||||||
listener,
|
listener,
|
||||||
true);
|
true);
|
||||||
} catch (SnapshotMissingException smex) {
|
} catch (SnapshotMissingException smex) {
|
||||||
logger.info((Supplier<?>) () -> new ParameterizedMessage(
|
logger.info(() -> new ParameterizedMessage(
|
||||||
"Tried deleting in-progress snapshot [{}], but it " +
|
"Tried deleting in-progress snapshot [{}], but it " +
|
||||||
"could not be found after failing to abort.",
|
"could not be found after failing to abort.",
|
||||||
smex.getSnapshotName()), e);
|
smex.getSnapshotName()), e);
|
||||||
|
@ -1339,7 +1337,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(String source, Exception e) {
|
public void onFailure(String source, Exception e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to remove snapshot deletion metadata", snapshot), e);
|
logger.warn(() -> new ParameterizedMessage("[{}] failed to remove snapshot deletion metadata", snapshot), e);
|
||||||
if (listener != null) {
|
if (listener != null) {
|
||||||
listener.onFailure(e);
|
listener.onFailure(e);
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.tasks;
|
||||||
|
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.elasticsearch.common.logging.Loggers;
|
import org.elasticsearch.common.logging.Loggers;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -51,6 +50,6 @@ public final class LoggingTaskListener<Response> implements TaskListener<Respons
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(Task task, Throwable e) {
|
public void onFailure(Task task, Throwable e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("{} failed with exception", task.getId()), e);
|
logger.warn(() -> new ParameterizedMessage("{} failed with exception", task.getId()), e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,7 +20,6 @@
|
||||||
package org.elasticsearch.tasks;
|
package org.elasticsearch.tasks;
|
||||||
|
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.elasticsearch.ElasticsearchException;
|
import org.elasticsearch.ElasticsearchException;
|
||||||
import org.elasticsearch.ElasticsearchTimeoutException;
|
import org.elasticsearch.ElasticsearchTimeoutException;
|
||||||
import org.elasticsearch.ExceptionsHelper;
|
import org.elasticsearch.ExceptionsHelper;
|
||||||
|
@ -197,8 +196,7 @@ public class TaskManager extends AbstractComponent implements ClusterStateApplie
|
||||||
try {
|
try {
|
||||||
taskResult = task.result(localNode, error);
|
taskResult = task.result(localNode, error);
|
||||||
} catch (IOException ex) {
|
} catch (IOException ex) {
|
||||||
logger.warn(
|
logger.warn(() -> new ParameterizedMessage("couldn't store error {}", ExceptionsHelper.detailedMessage(error)), ex);
|
||||||
(Supplier<?>) () -> new ParameterizedMessage("couldn't store error {}", ExceptionsHelper.detailedMessage(error)), ex);
|
|
||||||
listener.onFailure(ex);
|
listener.onFailure(ex);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -210,8 +208,7 @@ public class TaskManager extends AbstractComponent implements ClusterStateApplie
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(Exception e) {
|
public void onFailure(Exception e) {
|
||||||
logger.warn(
|
logger.warn(() -> new ParameterizedMessage("couldn't store error {}", ExceptionsHelper.detailedMessage(error)), e);
|
||||||
(Supplier<?>) () -> new ParameterizedMessage("couldn't store error {}", ExceptionsHelper.detailedMessage(error)), e);
|
|
||||||
listener.onFailure(e);
|
listener.onFailure(e);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
@ -232,7 +229,7 @@ public class TaskManager extends AbstractComponent implements ClusterStateApplie
|
||||||
try {
|
try {
|
||||||
taskResult = task.result(localNode, response);
|
taskResult = task.result(localNode, response);
|
||||||
} catch (IOException ex) {
|
} catch (IOException ex) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("couldn't store response {}", response), ex);
|
logger.warn(() -> new ParameterizedMessage("couldn't store response {}", response), ex);
|
||||||
listener.onFailure(ex);
|
listener.onFailure(ex);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -245,7 +242,7 @@ public class TaskManager extends AbstractComponent implements ClusterStateApplie
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(Exception e) {
|
public void onFailure(Exception e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("couldn't store response {}", response), e);
|
logger.warn(() -> new ParameterizedMessage("couldn't store response {}", response), e);
|
||||||
listener.onFailure(e);
|
listener.onFailure(e);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
|
@ -185,8 +185,7 @@ public class TaskResultsService extends AbstractComponent {
|
||||||
Streams.copy(is, out);
|
Streams.copy(is, out);
|
||||||
return out.toString(StandardCharsets.UTF_8.name());
|
return out.toString(StandardCharsets.UTF_8.name());
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.error(
|
logger.error(() -> new ParameterizedMessage(
|
||||||
(Supplier<?>) () -> new ParameterizedMessage(
|
|
||||||
"failed to create tasks results index template [{}]", TASK_RESULT_INDEX_MAPPING_FILE), e);
|
"failed to create tasks results index template [{}]", TASK_RESULT_INDEX_MAPPING_FILE), e);
|
||||||
throw new IllegalStateException("failed to create tasks results index template [" + TASK_RESULT_INDEX_MAPPING_FILE + "]", e);
|
throw new IllegalStateException("failed to create tasks results index template [" + TASK_RESULT_INDEX_MAPPING_FILE + "]", e);
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,7 +20,6 @@
|
||||||
package org.elasticsearch.threadpool;
|
package org.elasticsearch.threadpool;
|
||||||
|
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.apache.lucene.util.Counter;
|
import org.apache.lucene.util.Counter;
|
||||||
import org.elasticsearch.core.internal.io.IOUtils;
|
import org.elasticsearch.core.internal.io.IOUtils;
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
|
@ -351,11 +350,11 @@ public class ThreadPool extends AbstractComponent implements Scheduler, Closeabl
|
||||||
return new ReschedulingRunnable(command, interval, executor, this,
|
return new ReschedulingRunnable(command, interval, executor, this,
|
||||||
(e) -> {
|
(e) -> {
|
||||||
if (logger.isDebugEnabled()) {
|
if (logger.isDebugEnabled()) {
|
||||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("scheduled task [{}] was rejected on thread pool [{}]",
|
logger.debug(() -> new ParameterizedMessage("scheduled task [{}] was rejected on thread pool [{}]",
|
||||||
command, executor), e);
|
command, executor), e);
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
(e) -> logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to run scheduled task [{}] on thread pool [{}]",
|
(e) -> logger.warn(() -> new ParameterizedMessage("failed to run scheduled task [{}] on thread pool [{}]",
|
||||||
command, executor), e));
|
command, executor), e));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -443,7 +442,7 @@ public class ThreadPool extends AbstractComponent implements Scheduler, Closeabl
|
||||||
try {
|
try {
|
||||||
runnable.run();
|
runnable.run();
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to run {}", runnable.toString()), e);
|
logger.warn(() -> new ParameterizedMessage("failed to run {}", runnable.toString()), e);
|
||||||
throw e;
|
throw e;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,7 +19,6 @@
|
||||||
package org.elasticsearch.transport;
|
package org.elasticsearch.transport;
|
||||||
|
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.apache.lucene.store.AlreadyClosedException;
|
import org.apache.lucene.store.AlreadyClosedException;
|
||||||
import org.elasticsearch.core.internal.io.IOUtils;
|
import org.elasticsearch.core.internal.io.IOUtils;
|
||||||
import org.apache.lucene.util.SetOnce;
|
import org.apache.lucene.util.SetOnce;
|
||||||
|
@ -65,6 +64,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
|
||||||
import java.util.function.Consumer;
|
import java.util.function.Consumer;
|
||||||
import java.util.function.Function;
|
import java.util.function.Function;
|
||||||
import java.util.function.Predicate;
|
import java.util.function.Predicate;
|
||||||
|
import java.util.function.Supplier;
|
||||||
import java.util.stream.Collectors;
|
import java.util.stream.Collectors;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -433,7 +433,7 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo
|
||||||
handshakeNode = transportService.handshake(connection, remoteProfile.getHandshakeTimeout().millis(),
|
handshakeNode = transportService.handshake(connection, remoteProfile.getHandshakeTimeout().millis(),
|
||||||
(c) -> remoteClusterName.get() == null ? true : c.equals(remoteClusterName.get()));
|
(c) -> remoteClusterName.get() == null ? true : c.equals(remoteClusterName.get()));
|
||||||
} catch (IllegalStateException ex) {
|
} catch (IllegalStateException ex) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("seed node {} cluster name mismatch expected " +
|
logger.warn(() -> new ParameterizedMessage("seed node {} cluster name mismatch expected " +
|
||||||
"cluster name {}", connection.getNode(), remoteClusterName.get()), ex);
|
"cluster name {}", connection.getNode(), remoteClusterName.get()), ex);
|
||||||
throw ex;
|
throw ex;
|
||||||
}
|
}
|
||||||
|
@ -475,8 +475,7 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo
|
||||||
} catch (ConnectTransportException | IOException | IllegalStateException ex) {
|
} catch (ConnectTransportException | IOException | IllegalStateException ex) {
|
||||||
// ISE if we fail the handshake with an version incompatible node
|
// ISE if we fail the handshake with an version incompatible node
|
||||||
if (seedNodes.hasNext()) {
|
if (seedNodes.hasNext()) {
|
||||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("fetching nodes from external cluster {} failed",
|
logger.debug(() -> new ParameterizedMessage("fetching nodes from external cluster {} failed", clusterAlias), ex);
|
||||||
clusterAlias), ex);
|
|
||||||
collectRemoteNodes(seedNodes, transportService, listener);
|
collectRemoteNodes(seedNodes, transportService, listener);
|
||||||
} else {
|
} else {
|
||||||
listener.onFailure(ex);
|
listener.onFailure(ex);
|
||||||
|
@ -551,8 +550,7 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo
|
||||||
} catch (ConnectTransportException | IllegalStateException ex) {
|
} catch (ConnectTransportException | IllegalStateException ex) {
|
||||||
// ISE if we fail the handshake with an version incompatible node
|
// ISE if we fail the handshake with an version incompatible node
|
||||||
// fair enough we can't connect just move on
|
// fair enough we can't connect just move on
|
||||||
logger.debug((Supplier<?>)
|
logger.debug(() -> new ParameterizedMessage("failed to connect to node {}", node), ex);
|
||||||
() -> new ParameterizedMessage("failed to connect to node {}", node), ex);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -562,9 +560,7 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo
|
||||||
} catch (CancellableThreads.ExecutionCancelledException ex) {
|
} catch (CancellableThreads.ExecutionCancelledException ex) {
|
||||||
listener.onFailure(ex); // we got canceled - fail the listener and step out
|
listener.onFailure(ex); // we got canceled - fail the listener and step out
|
||||||
} catch (Exception ex) {
|
} catch (Exception ex) {
|
||||||
logger.warn((Supplier<?>)
|
logger.warn(() -> new ParameterizedMessage("fetching nodes from external cluster {} failed", clusterAlias), ex);
|
||||||
() -> new ParameterizedMessage("fetching nodes from external cluster {} failed",
|
|
||||||
clusterAlias), ex);
|
|
||||||
collectRemoteNodes(seedNodes, transportService, listener);
|
collectRemoteNodes(seedNodes, transportService, listener);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -572,9 +568,7 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo
|
||||||
@Override
|
@Override
|
||||||
public void handleException(TransportException exp) {
|
public void handleException(TransportException exp) {
|
||||||
assert transportService.getThreadPool().getThreadContext().isSystemContext() == false : "context is a system context";
|
assert transportService.getThreadPool().getThreadContext().isSystemContext() == false : "context is a system context";
|
||||||
logger.warn((Supplier<?>)
|
logger.warn(() -> new ParameterizedMessage("fetching nodes from external cluster {} failed", clusterAlias), exp);
|
||||||
() -> new ParameterizedMessage("fetching nodes from external cluster {} failed", clusterAlias),
|
|
||||||
exp);
|
|
||||||
try {
|
try {
|
||||||
IOUtils.closeWhileHandlingException(connection);
|
IOUtils.closeWhileHandlingException(connection);
|
||||||
} finally {
|
} finally {
|
||||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.transport;
|
||||||
import com.carrotsearch.hppc.IntHashSet;
|
import com.carrotsearch.hppc.IntHashSet;
|
||||||
import com.carrotsearch.hppc.IntSet;
|
import com.carrotsearch.hppc.IntSet;
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
|
||||||
import org.elasticsearch.core.internal.io.IOUtils;
|
import org.elasticsearch.core.internal.io.IOUtils;
|
||||||
import org.elasticsearch.ElasticsearchException;
|
import org.elasticsearch.ElasticsearchException;
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
|
@ -354,11 +353,10 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements
|
||||||
@Override
|
@Override
|
||||||
protected void innerOnFailure(Exception e) {
|
protected void innerOnFailure(Exception e) {
|
||||||
if (channel.isOpen()) {
|
if (channel.isOpen()) {
|
||||||
logger.debug(
|
logger.debug(() -> new ParameterizedMessage("[{}] failed to send ping transport message", node), e);
|
||||||
(Supplier<?>) () -> new ParameterizedMessage("[{}] failed to send ping transport message", node), e);
|
|
||||||
failedPings.inc();
|
failedPings.inc();
|
||||||
} else {
|
} else {
|
||||||
logger.trace((Supplier<?>) () ->
|
logger.trace(() ->
|
||||||
new ParameterizedMessage("[{}] failed to send ping transport message (channel closed)", node), e);
|
new ParameterizedMessage("[{}] failed to send ping transport message (channel closed)", node), e);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -545,9 +543,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements
|
||||||
throw new ConnectTransportException(node, "general node connection failure", e);
|
throw new ConnectTransportException(node, "general node connection failure", e);
|
||||||
} finally {
|
} finally {
|
||||||
if (success == false) { // close the connection if there is a failure
|
if (success == false) { // close the connection if there is a failure
|
||||||
logger.trace(
|
logger.trace(() -> new ParameterizedMessage("failed to connect to [{}], cleaning dangling connections", node));
|
||||||
(Supplier<?>) () -> new ParameterizedMessage(
|
|
||||||
"failed to connect to [{}], cleaning dangling connections", node));
|
|
||||||
IOUtils.closeWhileHandlingException(nodeChannels);
|
IOUtils.closeWhileHandlingException(nodeChannels);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -992,27 +988,21 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements
|
||||||
}
|
}
|
||||||
|
|
||||||
if (isCloseConnectionException(e)) {
|
if (isCloseConnectionException(e)) {
|
||||||
logger.trace(
|
logger.trace(() -> new ParameterizedMessage(
|
||||||
(Supplier<?>) () -> new ParameterizedMessage(
|
"close connection exception caught on transport layer [{}], disconnecting from relevant node", channel), e);
|
||||||
"close connection exception caught on transport layer [{}], disconnecting from relevant node",
|
|
||||||
channel),
|
|
||||||
e);
|
|
||||||
// close the channel, which will cause a node to be disconnected if relevant
|
// close the channel, which will cause a node to be disconnected if relevant
|
||||||
TcpChannel.closeChannel(channel, false);
|
TcpChannel.closeChannel(channel, false);
|
||||||
} else if (isConnectException(e)) {
|
} else if (isConnectException(e)) {
|
||||||
logger.trace((Supplier<?>) () -> new ParameterizedMessage("connect exception caught on transport layer [{}]", channel), e);
|
logger.trace(() -> new ParameterizedMessage("connect exception caught on transport layer [{}]", channel), e);
|
||||||
// close the channel as safe measure, which will cause a node to be disconnected if relevant
|
// close the channel as safe measure, which will cause a node to be disconnected if relevant
|
||||||
TcpChannel.closeChannel(channel, false);
|
TcpChannel.closeChannel(channel, false);
|
||||||
} else if (e instanceof BindException) {
|
} else if (e instanceof BindException) {
|
||||||
logger.trace((Supplier<?>) () -> new ParameterizedMessage("bind exception caught on transport layer [{}]", channel), e);
|
logger.trace(() -> new ParameterizedMessage("bind exception caught on transport layer [{}]", channel), e);
|
||||||
// close the channel as safe measure, which will cause a node to be disconnected if relevant
|
// close the channel as safe measure, which will cause a node to be disconnected if relevant
|
||||||
TcpChannel.closeChannel(channel, false);
|
TcpChannel.closeChannel(channel, false);
|
||||||
} else if (e instanceof CancelledKeyException) {
|
} else if (e instanceof CancelledKeyException) {
|
||||||
logger.trace(
|
logger.trace(() -> new ParameterizedMessage(
|
||||||
(Supplier<?>) () -> new ParameterizedMessage(
|
"cancelled key exception caught on transport layer [{}], disconnecting from relevant node", channel), e);
|
||||||
"cancelled key exception caught on transport layer [{}], disconnecting from relevant node",
|
|
||||||
channel),
|
|
||||||
e);
|
|
||||||
// close the channel as safe measure, which will cause a node to be disconnected if relevant
|
// close the channel as safe measure, which will cause a node to be disconnected if relevant
|
||||||
TcpChannel.closeChannel(channel, false);
|
TcpChannel.closeChannel(channel, false);
|
||||||
} else if (e instanceof TcpTransport.HttpOnTransportException) {
|
} else if (e instanceof TcpTransport.HttpOnTransportException) {
|
||||||
|
@ -1034,8 +1024,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements
|
||||||
internalSendMessage(channel, message, closeChannel);
|
internalSendMessage(channel, message, closeChannel);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
logger.warn(
|
logger.warn(() -> new ParameterizedMessage("exception caught on transport layer [{}], closing connection", channel), e);
|
||||||
(Supplier<?>) () -> new ParameterizedMessage("exception caught on transport layer [{}], closing connection", channel), e);
|
|
||||||
// close the channel, which will cause a node to be disconnected if relevant
|
// close the channel, which will cause a node to be disconnected if relevant
|
||||||
TcpChannel.closeChannel(channel, false);
|
TcpChannel.closeChannel(channel, false);
|
||||||
}
|
}
|
||||||
|
@ -1538,7 +1527,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements
|
||||||
try {
|
try {
|
||||||
handler.handleException(rtx);
|
handler.handleException(rtx);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("failed to handle exception response [{}]", handler), e);
|
logger.error(() -> new ParameterizedMessage("failed to handle exception response [{}]", handler), e);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
@ -1581,9 +1570,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements
|
||||||
transportChannel.sendResponse(e);
|
transportChannel.sendResponse(e);
|
||||||
} catch (IOException inner) {
|
} catch (IOException inner) {
|
||||||
inner.addSuppressed(e);
|
inner.addSuppressed(e);
|
||||||
logger.warn(
|
logger.warn(() -> new ParameterizedMessage("Failed to send error message back to client for action [{}]", action), inner);
|
||||||
(Supplier<?>) () -> new ParameterizedMessage(
|
|
||||||
"Failed to send error message back to client for action [{}]", action), inner);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return action;
|
return action;
|
||||||
|
@ -1629,8 +1616,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements
|
||||||
transportChannel.sendResponse(e);
|
transportChannel.sendResponse(e);
|
||||||
} catch (Exception inner) {
|
} catch (Exception inner) {
|
||||||
inner.addSuppressed(e);
|
inner.addSuppressed(e);
|
||||||
logger.warn(
|
logger.warn(() -> new ParameterizedMessage(
|
||||||
(Supplier<?>) () -> new ParameterizedMessage(
|
|
||||||
"Failed to send error message back to client for action [{}]", reg.getAction()), inner);
|
"Failed to send error message back to client for action [{}]", reg.getAction()), inner);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue