Logging: Drop Settings from some logger lookups (#33859)

Drops `Settings` from some of the methods to lookup loggers and
deprecates another logger lookup that takes `Settings` because
`Settings` is no longer required to build a logger.
This commit is contained in:
Nik Everett 2018-09-20 10:42:48 -04:00 committed by GitHub
parent e37e5dfc04
commit f963c29876
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
21 changed files with 86 additions and 80 deletions

View File

@ -36,7 +36,6 @@ import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.client.ParentTaskAssigningClient;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
@ -104,7 +103,6 @@ public abstract class AbstractAsyncBulkByScrollAction<Request extends AbstractBu
private final ActionListener<BulkByScrollResponse> listener;
private final Retry bulkRetry;
private final ScrollableHitSource scrollSource;
private final Settings settings;
/**
* This BiFunction is used to apply various changes depending of the Reindex action and the search hit,
@ -113,15 +111,9 @@ public abstract class AbstractAsyncBulkByScrollAction<Request extends AbstractBu
*/
private final BiFunction<RequestWrapper<?>, ScrollableHitSource.Hit, RequestWrapper<?>> scriptApplier;
public AbstractAsyncBulkByScrollAction(BulkByScrollTask task, Logger logger, ParentTaskAssigningClient client,
ThreadPool threadPool, Request mainRequest, ScriptService scriptService,
ClusterState clusterState, ActionListener<BulkByScrollResponse> listener) {
this(task, logger, client, threadPool, mainRequest, scriptService, clusterState, listener, client.settings());
}
public AbstractAsyncBulkByScrollAction(BulkByScrollTask task, Logger logger, ParentTaskAssigningClient client,
ThreadPool threadPool, Request mainRequest, ScriptService scriptService, ClusterState clusterState,
ActionListener<BulkByScrollResponse> listener, Settings settings) {
ActionListener<BulkByScrollResponse> listener) {
this.task = task;
if (!task.isWorker()) {
@ -131,7 +123,6 @@ public abstract class AbstractAsyncBulkByScrollAction<Request extends AbstractBu
this.logger = logger;
this.client = client;
this.settings = settings;
this.threadPool = threadPool;
this.scriptService = scriptService;
this.clusterState = clusterState;
@ -357,7 +348,7 @@ public abstract class AbstractAsyncBulkByScrollAction<Request extends AbstractBu
public void onFailure(Exception e) {
finishHim(e);
}
}, settings);
});
}
/**

View File

@ -256,16 +256,10 @@ public class TransportReindexAction extends HandledTransportAction<ReindexReques
*/
private List<Thread> createdThreads = emptyList();
AsyncIndexBySearchAction(BulkByScrollTask task, Logger logger, ParentTaskAssigningClient client,
ThreadPool threadPool, ReindexRequest request, ScriptService scriptService, ClusterState clusterState,
ActionListener<BulkByScrollResponse> listener) {
this(task, logger, client, threadPool, request, scriptService, clusterState, listener, client.settings());
}
AsyncIndexBySearchAction(BulkByScrollTask task, Logger logger, ParentTaskAssigningClient client,
ThreadPool threadPool, ReindexRequest request, ScriptService scriptService, ClusterState clusterState,
ActionListener<BulkByScrollResponse> listener, Settings settings) {
super(task, logger, client, threadPool, request, scriptService, clusterState, listener, settings);
ActionListener<BulkByScrollResponse> listener) {
super(task, logger, client, threadPool, request, scriptService, clusterState, listener);
}
@Override

View File

@ -82,16 +82,10 @@ public class TransportUpdateByQueryAction extends HandledTransportAction<UpdateB
* Simple implementation of update-by-query using scrolling and bulk.
*/
static class AsyncIndexBySearchAction extends AbstractAsyncBulkByScrollAction<UpdateByQueryRequest> {
AsyncIndexBySearchAction(BulkByScrollTask task, Logger logger, ParentTaskAssigningClient client,
ThreadPool threadPool, UpdateByQueryRequest request, ScriptService scriptService, ClusterState clusterState,
ActionListener<BulkByScrollResponse> listener) {
this(task, logger, client, threadPool, request, scriptService, clusterState, listener, client.settings());
}
AsyncIndexBySearchAction(BulkByScrollTask task, Logger logger, ParentTaskAssigningClient client,
ThreadPool threadPool, UpdateByQueryRequest request, ScriptService scriptService, ClusterState clusterState,
ActionListener<BulkByScrollResponse> listener, Settings settings) {
super(task, logger, client, threadPool, request, scriptService, clusterState, listener, settings);
ActionListener<BulkByScrollResponse> listener) {
super(task, logger, client, threadPool, request, scriptService, clusterState, listener);
}
@Override

View File

@ -672,7 +672,7 @@ public class AsyncBulkByScrollActionTests extends ESTestCase {
private class DummyAsyncBulkByScrollAction extends AbstractAsyncBulkByScrollAction<DummyAbstractBulkByScrollRequest> {
DummyAsyncBulkByScrollAction() {
super(testTask, AsyncBulkByScrollActionTests.this.logger, new ParentTaskAssigningClient(client, localNode, testTask),
client.threadPool(), testRequest, null, null, listener, Settings.EMPTY);
client.threadPool(), testRequest, null, null, listener);
}
@Override

View File

@ -21,7 +21,6 @@ package org.elasticsearch.index.reindex;
import org.elasticsearch.index.reindex.ScrollableHitSource.Hit;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.common.settings.Settings;
/**
* Index-by-search test for ttl, timestamp, and routing.
@ -78,7 +77,7 @@ public class ReindexMetadataTests extends AbstractAsyncBulkByScrollActionMetadat
private class TestAction extends TransportReindexAction.AsyncIndexBySearchAction {
TestAction() {
super(ReindexMetadataTests.this.task, ReindexMetadataTests.this.logger, null, ReindexMetadataTests.this.threadPool, request(),
null, null, listener(), Settings.EMPTY);
null, null, listener());
}
public ReindexRequest mainRequest() {

View File

@ -21,7 +21,6 @@ package org.elasticsearch.index.reindex;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.common.lucene.uid.Versions;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.script.ScriptService;
import java.util.Map;
@ -104,7 +103,7 @@ public class ReindexScriptTests extends AbstractAsyncBulkByScrollActionScriptTes
@Override
protected TransportReindexAction.AsyncIndexBySearchAction action(ScriptService scriptService, ReindexRequest request) {
return new TransportReindexAction.AsyncIndexBySearchAction(task, logger, null, threadPool, request, scriptService, null,
listener(), Settings.EMPTY);
return new TransportReindexAction.AsyncIndexBySearchAction(task, logger, null, threadPool, request, scriptService,
null, listener());
}
}

View File

@ -188,7 +188,7 @@ public class RetryTests extends ESIntegTestCase {
}
Retry retry = new Retry(BackoffPolicy.exponentialBackoff(), client().threadPool());
BulkResponse initialBulkResponse = retry.withBackoff(client()::bulk, bulk.request(), client().settings()).actionGet();
BulkResponse initialBulkResponse = retry.withBackoff(client()::bulk, bulk.request()).actionGet();
assertFalse(initialBulkResponse.buildFailureMessage(), initialBulkResponse.hasFailures());
client().admin().indices().prepareRefresh("source").get();

View File

@ -21,7 +21,6 @@ package org.elasticsearch.index.reindex;
import org.elasticsearch.index.reindex.ScrollableHitSource.Hit;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.common.settings.Settings;
public class UpdateByQueryMetadataTests
extends AbstractAsyncBulkByScrollActionMetadataTestCase<UpdateByQueryRequest, BulkByScrollResponse> {
@ -44,8 +43,7 @@ public class UpdateByQueryMetadataTests
private class TestAction extends TransportUpdateByQueryAction.AsyncIndexBySearchAction {
TestAction() {
super(UpdateByQueryMetadataTests.this.task, UpdateByQueryMetadataTests.this.logger, null,
UpdateByQueryMetadataTests.this.threadPool, request(), null, null, listener(),
Settings.EMPTY);
UpdateByQueryMetadataTests.this.threadPool, request(), null, null, listener());
}
@Override

View File

@ -19,7 +19,6 @@
package org.elasticsearch.index.reindex;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.script.ScriptService;
import java.util.Date;
@ -54,7 +53,7 @@ public class UpdateByQueryWithScriptTests
@Override
protected TransportUpdateByQueryAction.AsyncIndexBySearchAction action(ScriptService scriptService, UpdateByQueryRequest request) {
return new TransportUpdateByQueryAction.AsyncIndexBySearchAction(task, logger, null, threadPool, request, scriptService, null,
listener(), Settings.EMPTY);
return new TransportUpdateByQueryAction.AsyncIndexBySearchAction(task, logger, null, threadPool, request, scriptService,
null, listener());
}
}

View File

@ -32,9 +32,9 @@ import io.netty.handler.codec.http.HttpRequestEncoder;
import io.netty.handler.codec.http.HttpResponse;
import io.netty.handler.codec.http.HttpResponseDecoder;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeUnit;
@ -86,11 +86,11 @@ class NioHttpClient implements Closeable {
return list;
}
private static final Logger logger = LogManager.getLogger(NioHttpClient.class);
private final NioGroup nioGroup;
private final Logger logger;
NioHttpClient() {
logger = Loggers.getLogger(NioHttpClient.class, Settings.EMPTY);
try {
nioGroup = new NioGroup(daemonThreadFactory(Settings.EMPTY, "nio-http-client"), 1,
(s) -> new EventHandler(this::onException, s));

View File

@ -22,7 +22,6 @@ import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.threadpool.Scheduler;
import java.util.concurrent.CountDownLatch;
@ -80,7 +79,7 @@ public final class BulkRequestHandler {
latch.countDown();
}
}
}, Settings.EMPTY);
});
bulkRequestSetupSuccessful = true;
if (concurrentRequests == 0) {
latch.await();

View File

@ -19,9 +19,9 @@
package org.elasticsearch.action.bulk;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.FutureUtils;
@ -54,14 +54,43 @@ public class Retry {
* @param consumer The consumer to which apply the request and listener
* @param bulkRequest The bulk request that should be executed.
* @param listener A listener that is invoked when the bulk request finishes or completes with an exception. The listener is not
* @param settings settings
*/
public void withBackoff(BiConsumer<BulkRequest, ActionListener<BulkResponse>> consumer, BulkRequest bulkRequest,
ActionListener<BulkResponse> listener, Settings settings) {
RetryHandler r = new RetryHandler(backoffPolicy, consumer, listener, settings, scheduler);
ActionListener<BulkResponse> listener) {
RetryHandler r = new RetryHandler(backoffPolicy, consumer, listener, scheduler);
r.execute(bulkRequest);
}
/**
* Invokes #accept(BulkRequest, ActionListener). Backs off on the provided exception and delegates results to the
* provided listener. Retries will be scheduled using the class's thread pool.
* @param consumer The consumer to which apply the request and listener
* @param bulkRequest The bulk request that should be executed.
* @param listener A listener that is invoked when the bulk request finishes or completes with an exception. The listener is not
* @param settings settings
* @deprecated Prefer {@link #withBackoff(BiConsumer, BulkRequest, ActionListener)}. The {@link Settings} isn't used.
*/
@Deprecated
public void withBackoff(BiConsumer<BulkRequest, ActionListener<BulkResponse>> consumer, BulkRequest bulkRequest,
ActionListener<BulkResponse> listener, Settings settings) {
withBackoff(consumer, bulkRequest, listener);
}
/**
* Invokes #accept(BulkRequest, ActionListener). Backs off on the provided exception. Retries will be scheduled using
* the class's thread pool.
*
* @param consumer The consumer to which apply the request and listener
* @param bulkRequest The bulk request that should be executed.
* @return a future representing the bulk response returned by the client.
*/
public PlainActionFuture<BulkResponse> withBackoff(BiConsumer<BulkRequest, ActionListener<BulkResponse>> consumer,
BulkRequest bulkRequest) {
PlainActionFuture<BulkResponse> future = PlainActionFuture.newFuture();
withBackoff(consumer, bulkRequest, future);
return future;
}
/**
* Invokes #accept(BulkRequest, ActionListener). Backs off on the provided exception. Retries will be scheduled using
* the class's thread pool.
@ -70,18 +99,18 @@ public class Retry {
* @param bulkRequest The bulk request that should be executed.
* @param settings settings
* @return a future representing the bulk response returned by the client.
* @deprecated prefer {@link #withBackoff(BiConsumer, BulkRequest)}. The {@link Settings} isn't used.
*/
@Deprecated
public PlainActionFuture<BulkResponse> withBackoff(BiConsumer<BulkRequest, ActionListener<BulkResponse>> consumer,
BulkRequest bulkRequest, Settings settings) {
PlainActionFuture<BulkResponse> future = PlainActionFuture.newFuture();
withBackoff(consumer, bulkRequest, future, settings);
return future;
return withBackoff(consumer, bulkRequest);
}
static class RetryHandler implements ActionListener<BulkResponse> {
private static final RestStatus RETRY_STATUS = RestStatus.TOO_MANY_REQUESTS;
private static final Logger logger = LogManager.getLogger(RetryHandler.class);
private final Logger logger;
private final Scheduler scheduler;
private final BiConsumer<BulkRequest, ActionListener<BulkResponse>> consumer;
private final ActionListener<BulkResponse> listener;
@ -95,11 +124,10 @@ public class Retry {
private volatile ScheduledFuture<?> scheduledRequestFuture;
RetryHandler(BackoffPolicy backoffPolicy, BiConsumer<BulkRequest, ActionListener<BulkResponse>> consumer,
ActionListener<BulkResponse> listener, Settings settings, Scheduler scheduler) {
ActionListener<BulkResponse> listener, Scheduler scheduler) {
this.backoff = backoffPolicy.iterator();
this.consumer = consumer;
this.listener = listener;
this.logger = Loggers.getLogger(getClass(), settings);
this.scheduler = scheduler;
// in contrast to System.currentTimeMillis(), nanoTime() uses a monotonic clock under the hood
this.startTimestampNanos = System.nanoTime();

View File

@ -62,13 +62,23 @@ public class Loggers {
}
public static Logger getLogger(Class<?> clazz, Index index, String... prefixes) {
return getLogger(clazz, Settings.EMPTY, asArrayList(Loggers.SPACE, index.getName(), prefixes).toArray(new String[0]));
return getLogger(clazz, asArrayList(Loggers.SPACE, index.getName(), prefixes).toArray(new String[0]));
}
/**
* Get a logger.
* @deprecated prefer {@link #getLogger(Class, String...)} or {@link LogManager#getLogger}
* as the Settings is no longer needed
*/
@Deprecated
public static Logger getLogger(Class<?> clazz, Settings settings, String... prefixes) {
return ESLoggerFactory.getLogger(formatPrefix(prefixes), clazz);
}
public static Logger getLogger(Class<?> clazz, String... prefixes) {
return ESLoggerFactory.getLogger(formatPrefix(prefixes), clazz);
}
public static Logger getLogger(Logger parentLogger, String s) {
String prefix = null;
if (parentLogger instanceof PrefixLogger) {

View File

@ -20,10 +20,10 @@
package org.elasticsearch.common.settings;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.inject.Binder;
import org.elasticsearch.common.inject.Module;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentType;
@ -43,12 +43,12 @@ import java.util.stream.IntStream;
* A module that binds the provided settings to the {@link Settings} interface.
*/
public class SettingsModule implements Module {
private static final Logger logger = LogManager.getLogger(SettingsModule.class);
private final Settings settings;
private final Set<String> settingsFilterPattern = new HashSet<>();
private final Map<String, Setting<?>> nodeSettings = new HashMap<>();
private final Map<String, Setting<?>> indexSettings = new HashMap<>();
private final Logger logger;
private final IndexScopedSettings indexScopedSettings;
private final ClusterSettings clusterSettings;
private final SettingsFilter settingsFilter;
@ -62,7 +62,6 @@ public class SettingsModule implements Module {
List<Setting<?>> additionalSettings,
List<String> settingsFilter,
Set<SettingUpgrader<?>> settingUpgraders) {
logger = Loggers.getLogger(getClass(), settings);
this.settings = settings;
for (Setting<?> setting : ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) {
registerSetting(setting);

View File

@ -19,13 +19,14 @@
package org.elasticsearch.discovery;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.cluster.service.ClusterApplier;
import org.elasticsearch.cluster.service.MasterService;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
@ -60,6 +61,7 @@ import java.util.stream.Collectors;
* A module for loading classes for node discovery.
*/
public class DiscoveryModule {
private static final Logger logger = LogManager.getLogger(DiscoveryModule.class);
public static final Setting<String> DISCOVERY_TYPE_SETTING =
new Setting<>("discovery.type", "zen", Function.identity(), Property.NodeScope);
@ -131,7 +133,7 @@ public class DiscoveryModule {
if (discoverySupplier == null) {
throw new IllegalArgumentException("Unknown discovery type [" + discoveryType + "]");
}
Loggers.getLogger(getClass(), settings).info("using discovery type [{}] and host providers {}", discoveryType, hostsProviderNames);
logger.info("using discovery type [{}] and host providers {}", discoveryType, hostsProviderNames);
discovery = Objects.requireNonNull(discoverySupplier.get());
}

View File

@ -19,13 +19,13 @@
package org.elasticsearch.transport;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.CheckedBiConsumer;
import org.elasticsearch.common.component.Lifecycle;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.AbstractLifecycleRunnable;
@ -53,10 +53,10 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
* the connection when the connection manager is closed.
*/
public class ConnectionManager implements Closeable {
private static final Logger logger = LogManager.getLogger(ConnectionManager.class);
private final ConcurrentMap<DiscoveryNode, Transport.Connection> connectedNodes = ConcurrentCollections.newConcurrentMap();
private final KeyedLock<String> connectionLock = new KeyedLock<>();
private final Logger logger;
private final Transport transport;
private final ThreadPool threadPool;
private final TimeValue pingSchedule;
@ -71,7 +71,6 @@ public class ConnectionManager implements Closeable {
}
public ConnectionManager(Settings settings, Transport transport, ThreadPool threadPool, ConnectionProfile defaultProfile) {
this.logger = Loggers.getLogger(getClass(), settings);
this.transport = transport;
this.threadPool = threadPool;
this.pingSchedule = TcpTransport.PING_SCHEDULE.get(settings);

View File

@ -222,7 +222,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements
this.compress = Transport.TRANSPORT_TCP_COMPRESS.get(settings);
this.networkService = networkService;
this.transportName = transportName;
this.transportLogger = new TransportLogger(settings);
this.transportLogger = new TransportLogger();
final Settings defaultFeatures = DEFAULT_FEATURES_SETTING.get(settings);
if (defaultFeatures == null) {

View File

@ -19,13 +19,13 @@
package org.elasticsearch.transport;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
import org.elasticsearch.Version;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.compress.Compressor;
import org.elasticsearch.common.compress.CompressorFactory;
import org.elasticsearch.common.compress.NotCompressedException;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.core.internal.io.IOUtils;
@ -34,13 +34,9 @@ import java.io.IOException;
public final class TransportLogger {
private final Logger logger;
private static final Logger logger = LogManager.getLogger(TransportLogger.class);
private static final int HEADER_SIZE = TcpHeader.MARKER_BYTES_SIZE + TcpHeader.MESSAGE_LENGTH_SIZE;
TransportLogger(Settings settings) {
logger = Loggers.getLogger(TransportLogger.class, settings);
}
void logInboundMessage(TcpChannel channel, BytesReference message) {
if (logger.isTraceEnabled()) {
try {

View File

@ -85,7 +85,7 @@ public class RetryTests extends ESTestCase {
BulkRequest bulkRequest = createBulkRequest();
BulkResponse response = new Retry(backoff, bulkClient.threadPool())
.withBackoff(bulkClient::bulk, bulkRequest, bulkClient.settings())
.withBackoff(bulkClient::bulk, bulkRequest)
.actionGet();
assertFalse(response.hasFailures());
@ -97,7 +97,7 @@ public class RetryTests extends ESTestCase {
BulkRequest bulkRequest = createBulkRequest();
BulkResponse response = new Retry(backoff, bulkClient.threadPool())
.withBackoff(bulkClient::bulk, bulkRequest, bulkClient.settings())
.withBackoff(bulkClient::bulk, bulkRequest)
.actionGet();
assertTrue(response.hasFailures());
@ -110,7 +110,7 @@ public class RetryTests extends ESTestCase {
BulkRequest bulkRequest = createBulkRequest();
Retry retry = new Retry(backoff, bulkClient.threadPool());
retry.withBackoff(bulkClient::bulk, bulkRequest, listener, bulkClient.settings());
retry.withBackoff(bulkClient::bulk, bulkRequest, listener);
listener.awaitCallbacksCalled();
listener.assertOnResponseCalled();
@ -125,7 +125,7 @@ public class RetryTests extends ESTestCase {
BulkRequest bulkRequest = createBulkRequest();
Retry retry = new Retry(backoff, bulkClient.threadPool());
retry.withBackoff(bulkClient::bulk, bulkRequest, listener, bulkClient.settings());
retry.withBackoff(bulkClient::bulk, bulkRequest, listener);
listener.awaitCallbacksCalled();

View File

@ -20,10 +20,10 @@
package org.elasticsearch.cluster.metadata;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
@ -54,16 +54,15 @@ public class TemplateUpgradeServiceIT extends ESIntegTestCase {
return Collections.singletonList(TestPlugin.class);
}
public static class TestPlugin extends Plugin {
public static final class TestPlugin extends Plugin {
// This setting is used to simulate cluster state updates
static final Setting<Integer> UPDATE_TEMPLATE_DUMMY_SETTING =
Setting.intSetting("tests.update_template_count", 0, Setting.Property.NodeScope, Setting.Property.Dynamic);
private static final Logger logger = LogManager.getLogger(TestPlugin.class);
protected final Logger logger;
protected final Settings settings;
public TestPlugin(Settings settings) {
this.logger = Loggers.getLogger(getClass(), settings);
this.settings = settings;
}

View File

@ -55,7 +55,7 @@ public class TransportLoggerTests extends ESTestCase {
}
public void testLoggingHandler() throws IOException {
TransportLogger transportLogger = new TransportLogger(Settings.EMPTY);
TransportLogger transportLogger = new TransportLogger();
final String writePattern =
".*\\[length: \\d+" +