From 8ae2049889766e4c8cbe67bd3f0d1d9998c542a5 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Fri, 22 Jun 2018 13:46:48 +0200 Subject: [PATCH 1/8] Avoid deprecation warning when running the ML datafeed extractor. (#31463) In #29639 we added a `format` option to doc-value fields and deprecated usage of doc-value fields without a format so that we could migrate doc-value fields to use the format that comes with the mappings by default. However I missed to fix the machine-learning datafeed extractor. --- .../ml/datafeed/extractor/scroll/ExtractedField.java | 8 +++++++- .../ml/datafeed/extractor/scroll/ScrollDataExtractor.java | 8 +++++++- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ExtractedField.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ExtractedField.java index c2d866563d6..ef0dffa2691 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ExtractedField.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ExtractedField.java @@ -103,7 +103,13 @@ abstract class ExtractedField { if (value.length != 1) { return value; } - value[0] = ((BaseDateTime) value[0]).getMillis(); + if (value[0] instanceof String) { // doc_value field with the epoch_millis format + value[0] = Long.parseLong((String) value[0]); + } else if (value[0] instanceof BaseDateTime) { // script field + value[0] = ((BaseDateTime) value[0]).getMillis(); + } else { + throw new IllegalStateException("Unexpected value for a time field: " + value[0].getClass()); + } return value; } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractor.java index bbd9f9ad533..57681a0aafb 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractor.java @@ -20,6 +20,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.script.Script; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.fetch.StoredFieldsContext; +import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.ml.datafeed.extractor.DataExtractor; @@ -47,6 +48,7 @@ class ScrollDataExtractor implements DataExtractor { private static final Logger LOGGER = Loggers.getLogger(ScrollDataExtractor.class); private static final TimeValue SCROLL_TIMEOUT = new TimeValue(30, TimeUnit.MINUTES); + private static final String EPOCH_MILLIS_FORMAT = "epoch_millis"; private final Client client; private final ScrollDataExtractorContext context; @@ -115,7 +117,11 @@ class ScrollDataExtractor implements DataExtractor { context.query, context.extractedFields.timeField(), start, context.end)); for (String docValueField : context.extractedFields.getDocValueFields()) { - searchRequestBuilder.addDocValueField(docValueField); + if (docValueField.equals(context.extractedFields.timeField())) { + searchRequestBuilder.addDocValueField(docValueField, EPOCH_MILLIS_FORMAT); + } else { + searchRequestBuilder.addDocValueField(docValueField, DocValueFieldsContext.USE_DEFAULT_FORMAT); + } } String[] sourceFields = context.extractedFields.getSourceFields(); if (sourceFields.length == 0) { From f22f91c57a6199ddadb19b2bf839d3ac7c3e2fbd Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Fri, 22 Jun 2018 15:31:23 +0200 Subject: [PATCH 2/8] Allow multiple unicast host providers (#31509) Introduces support for multiple host providers, which allows the settings based hosts resolver to be treated just as any other UnicastHostsProvider. Also introduces the notion of a HostsResolver so that plugins such as FileBasedDiscovery do not need to create their own thread pool for resolving hosts, making it easier to add new similar kind of plugins. --- .../classic/AzureUnicastHostsProvider.java | 2 +- .../ec2/AwsEc2UnicastHostsProvider.java | 2 +- .../discovery/ec2/Ec2DiscoveryTests.java | 8 +- .../file/FileBasedDiscoveryPlugin.java | 48 +----------- .../file/FileBasedUnicastHostsProvider.java | 34 +-------- .../FileBasedUnicastHostsProviderTests.java | 12 ++- .../gce/GceUnicastHostsProvider.java | 2 +- .../discovery/gce/GceDiscoveryTests.java | 2 +- .../common/settings/ClusterSettings.java | 3 +- .../discovery/DiscoveryModule.java | 47 ++++++++---- .../zen/SettingsBasedHostsProvider.java | 75 +++++++++++++++++++ .../discovery/zen/UnicastHostsProvider.java | 12 ++- .../discovery/zen/UnicastZenPing.java | 64 ++++++---------- .../discovery/DiscoveryModuleTests.java | 36 ++++++++- .../single/SingleNodeDiscoveryIT.java | 2 +- .../discovery/zen/UnicastZenPingTests.java | 45 ++++++----- .../discovery/zen/ZenDiscoveryUnitTests.java | 2 +- .../discovery/MockUncasedHostProvider.java | 2 +- .../test/discovery/TestZenDiscovery.java | 4 +- 19 files changed, 224 insertions(+), 178 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/discovery/zen/SettingsBasedHostsProvider.java diff --git a/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/discovery/azure/classic/AzureUnicastHostsProvider.java b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/discovery/azure/classic/AzureUnicastHostsProvider.java index 482dafb008f..1a9265de2a7 100644 --- a/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/discovery/azure/classic/AzureUnicastHostsProvider.java +++ b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/discovery/azure/classic/AzureUnicastHostsProvider.java @@ -132,7 +132,7 @@ public class AzureUnicastHostsProvider extends AbstractComponent implements Unic * Setting `cloud.azure.refresh_interval` to `0` will disable caching (default). */ @Override - public List buildDynamicHosts() { + public List buildDynamicHosts(HostsResolver hostsResolver) { if (refreshInterval.millis() != 0) { if (dynamicHosts != null && (refreshInterval.millis() < 0 || (System.currentTimeMillis() - lastRefresh) < refreshInterval.millis())) { diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2UnicastHostsProvider.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2UnicastHostsProvider.java index 396e9f707d4..8f503704298 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2UnicastHostsProvider.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2UnicastHostsProvider.java @@ -92,7 +92,7 @@ class AwsEc2UnicastHostsProvider extends AbstractComponent implements UnicastHos } @Override - public List buildDynamicHosts() { + public List buildDynamicHosts(HostsResolver hostsResolver) { return dynamicHosts.getOrRefresh(); } diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java index 9dc2e02edc1..295df0c818a 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java @@ -93,7 +93,7 @@ public class Ec2DiscoveryTests extends ESTestCase { protected List buildDynamicHosts(Settings nodeSettings, int nodes, List> tagsList) { try (Ec2DiscoveryPluginMock plugin = new Ec2DiscoveryPluginMock(Settings.EMPTY, nodes, tagsList)) { AwsEc2UnicastHostsProvider provider = new AwsEc2UnicastHostsProvider(nodeSettings, transportService, plugin.ec2Service); - List dynamicHosts = provider.buildDynamicHosts(); + List dynamicHosts = provider.buildDynamicHosts(null); logger.debug("--> addresses found: {}", dynamicHosts); return dynamicHosts; } catch (IOException e) { @@ -307,7 +307,7 @@ public class Ec2DiscoveryTests extends ESTestCase { } }; for (int i=0; i<3; i++) { - provider.buildDynamicHosts(); + provider.buildDynamicHosts(null); } assertThat(provider.fetchCount, is(3)); } @@ -324,12 +324,12 @@ public class Ec2DiscoveryTests extends ESTestCase { } }; for (int i=0; i<3; i++) { - provider.buildDynamicHosts(); + provider.buildDynamicHosts(null); } assertThat(provider.fetchCount, is(1)); Thread.sleep(1_000L); // wait for cache to expire for (int i=0; i<3; i++) { - provider.buildDynamicHosts(); + provider.buildDynamicHosts(null); } assertThat(provider.fetchCount, is(2)); } diff --git a/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPlugin.java b/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPlugin.java index fb37b3bc011..4d264470785 100644 --- a/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPlugin.java +++ b/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPlugin.java @@ -19,35 +19,17 @@ package org.elasticsearch.discovery.file; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.discovery.zen.UnicastHostsProvider; -import org.elasticsearch.discovery.zen.UnicastZenPing; import org.elasticsearch.env.Environment; -import org.elasticsearch.env.NodeEnvironment; -import org.elasticsearch.node.Node; import org.elasticsearch.plugins.DiscoveryPlugin; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.script.ScriptService; -import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.watcher.ResourceWatcherService; -import java.io.IOException; import java.nio.file.Path; -import java.util.Collection; import java.util.Collections; import java.util.Map; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.ThreadFactory; -import java.util.concurrent.TimeUnit; import java.util.function.Supplier; /** @@ -57,47 +39,19 @@ import java.util.function.Supplier; */ public class FileBasedDiscoveryPlugin extends Plugin implements DiscoveryPlugin { - private static final Logger logger = Loggers.getLogger(FileBasedDiscoveryPlugin.class); - private final Settings settings; private final Path configPath; - private ExecutorService fileBasedDiscoveryExecutorService; public FileBasedDiscoveryPlugin(Settings settings, Path configPath) { this.settings = settings; this.configPath = configPath; } - @Override - public Collection createComponents(Client client, ClusterService clusterService, ThreadPool threadPool, - ResourceWatcherService resourceWatcherService, ScriptService scriptService, - NamedXContentRegistry xContentRegistry, Environment environment, - NodeEnvironment nodeEnvironment, NamedWriteableRegistry namedWriteableRegistry) { - final int concurrentConnects = UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING.get(settings); - final ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(settings, "[file_based_discovery_resolve]"); - fileBasedDiscoveryExecutorService = EsExecutors.newScaling( - Node.NODE_NAME_SETTING.get(settings) + "/" + "file_based_discovery_resolve", - 0, - concurrentConnects, - 60, - TimeUnit.SECONDS, - threadFactory, - threadPool.getThreadContext()); - - return Collections.emptyList(); - } - - @Override - public void close() throws IOException { - ThreadPool.terminate(fileBasedDiscoveryExecutorService, 0, TimeUnit.SECONDS); - } - @Override public Map> getZenHostsProviders(TransportService transportService, NetworkService networkService) { return Collections.singletonMap( "file", - () -> new FileBasedUnicastHostsProvider( - new Environment(settings, configPath), transportService, fileBasedDiscoveryExecutorService)); + () -> new FileBasedUnicastHostsProvider(new Environment(settings, configPath))); } } diff --git a/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProvider.java b/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProvider.java index 7abcb445472..584ae4de5a2 100644 --- a/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProvider.java +++ b/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProvider.java @@ -23,26 +23,19 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.discovery.zen.UnicastHostsProvider; import org.elasticsearch.env.Environment; -import org.elasticsearch.transport.TransportService; import java.io.FileNotFoundException; import java.io.IOException; import java.nio.file.Files; import java.nio.file.NoSuchFileException; import java.nio.file.Path; -import java.util.ArrayList; import java.util.Collections; import java.util.List; -import java.util.concurrent.ExecutorService; import java.util.stream.Collectors; import java.util.stream.Stream; -import static org.elasticsearch.discovery.zen.UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS_RESOLVE_TIMEOUT; -import static org.elasticsearch.discovery.zen.UnicastZenPing.resolveHostsLists; - /** * An implementation of {@link UnicastHostsProvider} that reads hosts/ports * from {@link #UNICAST_HOSTS_FILE}. @@ -59,23 +52,15 @@ class FileBasedUnicastHostsProvider extends AbstractComponent implements Unicast static final String UNICAST_HOSTS_FILE = "unicast_hosts.txt"; - private final TransportService transportService; - private final ExecutorService executorService; - private final Path unicastHostsFilePath; - private final TimeValue resolveTimeout; - - FileBasedUnicastHostsProvider(Environment environment, TransportService transportService, ExecutorService executorService) { + FileBasedUnicastHostsProvider(Environment environment) { super(environment.settings()); - this.transportService = transportService; - this.executorService = executorService; this.unicastHostsFilePath = environment.configFile().resolve("discovery-file").resolve(UNICAST_HOSTS_FILE); - this.resolveTimeout = DISCOVERY_ZEN_PING_UNICAST_HOSTS_RESOLVE_TIMEOUT.get(settings); } @Override - public List buildDynamicHosts() { + public List buildDynamicHosts(HostsResolver hostsResolver) { List hostsList; try (Stream lines = Files.lines(unicastHostsFilePath)) { hostsList = lines.filter(line -> line.startsWith("#") == false) // lines starting with `#` are comments @@ -90,21 +75,8 @@ class FileBasedUnicastHostsProvider extends AbstractComponent implements Unicast hostsList = Collections.emptyList(); } - final List dynamicHosts = new ArrayList<>(); - try { - dynamicHosts.addAll(resolveHostsLists( - executorService, - logger, - hostsList, - 1, - transportService, - resolveTimeout)); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - + final List dynamicHosts = hostsResolver.resolveHosts(hostsList, 1); logger.debug("[discovery-file] Using dynamic discovery nodes {}", dynamicHosts); - return dynamicHosts; } diff --git a/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProviderTests.java b/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProviderTests.java index 860d3537635..5837d3bcdfe 100644 --- a/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProviderTests.java +++ b/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProviderTests.java @@ -24,7 +24,9 @@ import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.discovery.zen.UnicastZenPing; import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; @@ -123,8 +125,10 @@ public class FileBasedUnicastHostsProviderTests extends ESTestCase { .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) .build(); final Environment environment = TestEnvironment.newEnvironment(settings); - final FileBasedUnicastHostsProvider provider = new FileBasedUnicastHostsProvider(environment, transportService, executorService); - final List addresses = provider.buildDynamicHosts(); + final FileBasedUnicastHostsProvider provider = new FileBasedUnicastHostsProvider(environment); + final List addresses = provider.buildDynamicHosts((hosts, limitPortCounts) -> + UnicastZenPing.resolveHostsLists(executorService, logger, hosts, limitPortCounts, transportService, + TimeValue.timeValueSeconds(10))); assertEquals(0, addresses.size()); } @@ -163,6 +167,8 @@ public class FileBasedUnicastHostsProviderTests extends ESTestCase { } return new FileBasedUnicastHostsProvider( - new Environment(settings, configPath), transportService, executorService).buildDynamicHosts(); + new Environment(settings, configPath)).buildDynamicHosts((hosts, limitPortCounts) -> + UnicastZenPing.resolveHostsLists(executorService, logger, hosts, limitPortCounts, transportService, + TimeValue.timeValueSeconds(10))); } } diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceUnicastHostsProvider.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceUnicastHostsProvider.java index 790d70a8b99..778c38697c5 100644 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceUnicastHostsProvider.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceUnicastHostsProvider.java @@ -93,7 +93,7 @@ public class GceUnicastHostsProvider extends AbstractComponent implements Unicas * Information can be cached using `cloud.gce.refresh_interval` property if needed. */ @Override - public List buildDynamicHosts() { + public List buildDynamicHosts(HostsResolver hostsResolver) { // We check that needed properties have been set if (this.project == null || this.project.isEmpty() || this.zones == null || this.zones.isEmpty()) { throw new IllegalArgumentException("one or more gce discovery settings are missing. " + diff --git a/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoveryTests.java b/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoveryTests.java index a1944a15d80..816152186e7 100644 --- a/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoveryTests.java +++ b/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoveryTests.java @@ -108,7 +108,7 @@ public class GceDiscoveryTests extends ESTestCase { GceUnicastHostsProvider provider = new GceUnicastHostsProvider(nodeSettings, gceInstancesService, transportService, new NetworkService(Collections.emptyList())); - List dynamicHosts = provider.buildDynamicHosts(); + List dynamicHosts = provider.buildDynamicHosts(null); logger.info("--> addresses found: {}", dynamicHosts); return dynamicHosts; } diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index e616613a425..478325c66f9 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -56,6 +56,7 @@ import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.zen.ElectMasterService; import org.elasticsearch.discovery.zen.FaultDetection; +import org.elasticsearch.discovery.zen.SettingsBasedHostsProvider; import org.elasticsearch.discovery.zen.UnicastZenPing; import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.env.Environment; @@ -357,7 +358,7 @@ public final class ClusterSettings extends AbstractScopedSettings { ZenDiscovery.MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT_SETTING, ZenDiscovery.MASTER_ELECTION_IGNORE_NON_MASTER_PINGS_SETTING, ZenDiscovery.MAX_PENDING_CLUSTER_STATES_SETTING, - UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING, + SettingsBasedHostsProvider.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING, UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING, UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS_RESOLVE_TIMEOUT, SearchService.DEFAULT_KEEPALIVE_SETTING, diff --git a/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java b/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java index 179692cd516..e47fe7a7a70 100644 --- a/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java +++ b/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java @@ -31,7 +31,9 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.discovery.single.SingleNodeDiscovery; +import org.elasticsearch.discovery.zen.SettingsBasedHostsProvider; import org.elasticsearch.discovery.zen.UnicastHostsProvider; import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.plugins.DiscoveryPlugin; @@ -42,13 +44,15 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; -import java.util.Optional; +import java.util.Set; import java.util.function.BiConsumer; import java.util.function.Function; import java.util.function.Supplier; +import java.util.stream.Collectors; /** * A module for loading classes for node discovery. @@ -57,8 +61,8 @@ public class DiscoveryModule { public static final Setting DISCOVERY_TYPE_SETTING = new Setting<>("discovery.type", "zen", Function.identity(), Property.NodeScope); - public static final Setting> DISCOVERY_HOSTS_PROVIDER_SETTING = - new Setting<>("discovery.zen.hosts_provider", (String)null, Optional::ofNullable, Property.NodeScope); + public static final Setting> DISCOVERY_HOSTS_PROVIDER_SETTING = + Setting.listSetting("discovery.zen.hosts_provider", Collections.emptyList(), Function.identity(), Property.NodeScope); private final Discovery discovery; @@ -66,9 +70,9 @@ public class DiscoveryModule { NamedWriteableRegistry namedWriteableRegistry, NetworkService networkService, MasterService masterService, ClusterApplier clusterApplier, ClusterSettings clusterSettings, List plugins, AllocationService allocationService) { - final UnicastHostsProvider hostsProvider; final Collection> joinValidators = new ArrayList<>(); - Map> hostProviders = new HashMap<>(); + final Map> hostProviders = new HashMap<>(); + hostProviders.put("settings", () -> new SettingsBasedHostsProvider(settings, transportService)); for (DiscoveryPlugin plugin : plugins) { plugin.getZenHostsProviders(transportService, networkService).entrySet().forEach(entry -> { if (hostProviders.put(entry.getKey(), entry.getValue()) != null) { @@ -80,17 +84,32 @@ public class DiscoveryModule { joinValidators.add(joinValidator); } } - Optional hostsProviderName = DISCOVERY_HOSTS_PROVIDER_SETTING.get(settings); - if (hostsProviderName.isPresent()) { - Supplier hostsProviderSupplier = hostProviders.get(hostsProviderName.get()); - if (hostsProviderSupplier == null) { - throw new IllegalArgumentException("Unknown zen hosts provider [" + hostsProviderName.get() + "]"); - } - hostsProvider = Objects.requireNonNull(hostsProviderSupplier.get()); - } else { - hostsProvider = Collections::emptyList; + List hostsProviderNames = DISCOVERY_HOSTS_PROVIDER_SETTING.get(settings); + // for bwc purposes, add settings provider even if not explicitly specified + if (hostsProviderNames.contains("settings") == false) { + List extendedHostsProviderNames = new ArrayList<>(); + extendedHostsProviderNames.add("settings"); + extendedHostsProviderNames.addAll(hostsProviderNames); + hostsProviderNames = extendedHostsProviderNames; } + final Set missingProviderNames = new HashSet<>(hostsProviderNames); + missingProviderNames.removeAll(hostProviders.keySet()); + if (missingProviderNames.isEmpty() == false) { + throw new IllegalArgumentException("Unknown zen hosts providers " + missingProviderNames); + } + + List filteredHostsProviders = hostsProviderNames.stream() + .map(hostProviders::get).map(Supplier::get).collect(Collectors.toList()); + + final UnicastHostsProvider hostsProvider = hostsResolver -> { + final List addresses = new ArrayList<>(); + for (UnicastHostsProvider provider : filteredHostsProviders) { + addresses.addAll(provider.buildDynamicHosts(hostsResolver)); + } + return Collections.unmodifiableList(addresses); + }; + Map> discoveryTypes = new HashMap<>(); discoveryTypes.put("zen", () -> new ZenDiscovery(settings, threadPool, transportService, namedWriteableRegistry, masterService, clusterApplier, diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/SettingsBasedHostsProvider.java b/server/src/main/java/org/elasticsearch/discovery/zen/SettingsBasedHostsProvider.java new file mode 100644 index 00000000000..6d6453c776e --- /dev/null +++ b/server/src/main/java/org/elasticsearch/discovery/zen/SettingsBasedHostsProvider.java @@ -0,0 +1,75 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.discovery.zen; + +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.transport.TransportService; + +import java.util.List; +import java.util.function.Function; + +import static java.util.Collections.emptyList; + +/** + * An implementation of {@link UnicastHostsProvider} that reads hosts/ports + * from the "discovery.zen.ping.unicast.hosts" node setting. If the port is + * left off an entry, a default port of 9300 is assumed. + * + * An example unicast hosts setting might look as follows: + * [67.81.244.10, 67.81.244.11:9305, 67.81.244.15:9400] + */ +public class SettingsBasedHostsProvider extends AbstractComponent implements UnicastHostsProvider { + + public static final Setting> DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING = + Setting.listSetting("discovery.zen.ping.unicast.hosts", emptyList(), Function.identity(), Setting.Property.NodeScope); + + // these limits are per-address + public static final int LIMIT_FOREIGN_PORTS_COUNT = 1; + public static final int LIMIT_LOCAL_PORTS_COUNT = 5; + + private final List configuredHosts; + + private final int limitPortCounts; + + public SettingsBasedHostsProvider(Settings settings, TransportService transportService) { + super(settings); + + if (DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.exists(settings)) { + configuredHosts = DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.get(settings); + // we only limit to 1 address, makes no sense to ping 100 ports + limitPortCounts = LIMIT_FOREIGN_PORTS_COUNT; + } else { + // if unicast hosts are not specified, fill with simple defaults on the local machine + configuredHosts = transportService.getLocalAddresses(); + limitPortCounts = LIMIT_LOCAL_PORTS_COUNT; + } + + logger.debug("using initial hosts {}", configuredHosts); + } + + @Override + public List buildDynamicHosts(HostsResolver hostsResolver) { + return hostsResolver.resolveHosts(configuredHosts, limitPortCounts); + } + +} diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/UnicastHostsProvider.java b/server/src/main/java/org/elasticsearch/discovery/zen/UnicastHostsProvider.java index d719f9d123b..86410005c92 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/UnicastHostsProvider.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/UnicastHostsProvider.java @@ -31,5 +31,15 @@ public interface UnicastHostsProvider { /** * Builds the dynamic list of unicast hosts to be used for unicast discovery. */ - List buildDynamicHosts(); + List buildDynamicHosts(HostsResolver hostsResolver); + + /** + * Helper object that allows to resolve a list of hosts to a list of transport addresses. + * Each host is resolved into a transport address (or a collection of addresses if the + * number of ports is greater than one) + */ + interface HostsResolver { + List resolveHosts(List hosts, int limitPortCounts); + } + } diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java b/server/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java index cbadbb4a1e0..9c86fa17e9b 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java @@ -82,11 +82,9 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Consumer; -import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.Stream; -import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap; @@ -94,26 +92,15 @@ import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.new public class UnicastZenPing extends AbstractComponent implements ZenPing { public static final String ACTION_NAME = "internal:discovery/zen/unicast"; - public static final Setting> DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING = - Setting.listSetting("discovery.zen.ping.unicast.hosts", emptyList(), Function.identity(), - Property.NodeScope); public static final Setting DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING = Setting.intSetting("discovery.zen.ping.unicast.concurrent_connects", 10, 0, Property.NodeScope); public static final Setting DISCOVERY_ZEN_PING_UNICAST_HOSTS_RESOLVE_TIMEOUT = Setting.positiveTimeSetting("discovery.zen.ping.unicast.hosts.resolve_timeout", TimeValue.timeValueSeconds(5), Property.NodeScope); - // these limits are per-address - public static final int LIMIT_FOREIGN_PORTS_COUNT = 1; - public static final int LIMIT_LOCAL_PORTS_COUNT = 5; - private final ThreadPool threadPool; private final TransportService transportService; private final ClusterName clusterName; - private final List configuredHosts; - - private final int limitPortCounts; - private final PingContextProvider contextProvider; private final AtomicInteger pingingRoundIdGenerator = new AtomicInteger(); @@ -141,19 +128,10 @@ public class UnicastZenPing extends AbstractComponent implements ZenPing { this.contextProvider = contextProvider; final int concurrentConnects = DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING.get(settings); - if (DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.exists(settings)) { - configuredHosts = DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.get(settings); - // we only limit to 1 addresses, makes no sense to ping 100 ports - limitPortCounts = LIMIT_FOREIGN_PORTS_COUNT; - } else { - // if unicast hosts are not specified, fill with simple defaults on the local machine - configuredHosts = transportService.getLocalAddresses(); - limitPortCounts = LIMIT_LOCAL_PORTS_COUNT; - } + resolveTimeout = DISCOVERY_ZEN_PING_UNICAST_HOSTS_RESOLVE_TIMEOUT.get(settings); logger.debug( - "using initial hosts {}, with concurrent_connects [{}], resolve_timeout [{}]", - configuredHosts, + "using concurrent_connects [{}], resolve_timeout [{}]", concurrentConnects, resolveTimeout); @@ -172,9 +150,9 @@ public class UnicastZenPing extends AbstractComponent implements ZenPing { } /** - * Resolves a list of hosts to a list of discovery nodes. Each host is resolved into a transport address (or a collection of addresses - * if the number of ports is greater than one) and the transport addresses are used to created discovery nodes. Host lookups are done - * in parallel using specified executor service up to the specified resolve timeout. + * Resolves a list of hosts to a list of transport addresses. Each host is resolved into a transport address (or a collection of + * addresses if the number of ports is greater than one). Host lookups are done in parallel using specified executor service up + * to the specified resolve timeout. * * @param executorService the executor service used to parallelize hostname lookups * @param logger logger used for logging messages regarding hostname lookups @@ -190,7 +168,7 @@ public class UnicastZenPing extends AbstractComponent implements ZenPing { final List hosts, final int limitPortCounts, final TransportService transportService, - final TimeValue resolveTimeout) throws InterruptedException { + final TimeValue resolveTimeout) { Objects.requireNonNull(executorService); Objects.requireNonNull(logger); Objects.requireNonNull(hosts); @@ -205,8 +183,13 @@ public class UnicastZenPing extends AbstractComponent implements ZenPing { .stream() .map(hn -> (Callable) () -> transportService.addressesFromString(hn, limitPortCounts)) .collect(Collectors.toList()); - final List> futures = - executorService.invokeAll(callables, resolveTimeout.nanos(), TimeUnit.NANOSECONDS); + final List> futures; + try { + futures = executorService.invokeAll(callables, resolveTimeout.nanos(), TimeUnit.NANOSECONDS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + return Collections.emptyList(); + } final List transportAddresses = new ArrayList<>(); final Set localAddresses = new HashSet<>(); localAddresses.add(transportService.boundAddress().publishAddress()); @@ -232,6 +215,9 @@ public class UnicastZenPing extends AbstractComponent implements ZenPing { assert e.getCause() != null; final String message = "failed to resolve host [" + hostname + "]"; logger.warn(message, e.getCause()); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + // ignore } } else { logger.warn("timed out after [{}] resolving host [{}]", resolveTimeout, hostname); @@ -240,6 +226,11 @@ public class UnicastZenPing extends AbstractComponent implements ZenPing { return Collections.unmodifiableList(transportAddresses); } + private UnicastHostsProvider.HostsResolver createHostsResolver() { + return (hosts, limitPortCounts) -> resolveHostsLists(unicastZenPingExecutorService, logger, hosts, + limitPortCounts, transportService, resolveTimeout); + } + @Override public void close() { ThreadPool.terminate(unicastZenPingExecutorService, 10, TimeUnit.SECONDS); @@ -281,18 +272,7 @@ public class UnicastZenPing extends AbstractComponent implements ZenPing { final TimeValue scheduleDuration, final TimeValue requestDuration) { final List seedAddresses = new ArrayList<>(); - try { - seedAddresses.addAll(resolveHostsLists( - unicastZenPingExecutorService, - logger, - configuredHosts, - limitPortCounts, - transportService, - resolveTimeout)); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - seedAddresses.addAll(hostsProvider.buildDynamicHosts()); + seedAddresses.addAll(hostsProvider.buildDynamicHosts(createHostsResolver())); final DiscoveryNodes nodes = contextProvider.clusterState().nodes(); // add all possible master nodes that were active in the last known cluster configuration for (ObjectCursor masterNode : nodes.getMasterNodes().values()) { diff --git a/server/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java b/server/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java index 18829d51597..f2491b2db1f 100644 --- a/server/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java @@ -137,11 +137,10 @@ public class DiscoveryModuleTests extends ESTestCase { public void testHostsProvider() { Settings settings = Settings.builder().put(DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING.getKey(), "custom").build(); - final UnicastHostsProvider provider = Collections::emptyList; AtomicBoolean created = new AtomicBoolean(false); DummyHostsProviderPlugin plugin = () -> Collections.singletonMap("custom", () -> { created.set(true); - return Collections::emptyList; + return hostsResolver -> Collections.emptyList(); }); newModule(settings, Collections.singletonList(plugin)); assertTrue(created.get()); @@ -151,7 +150,7 @@ public class DiscoveryModuleTests extends ESTestCase { Settings settings = Settings.builder().put(DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING.getKey(), "dne").build(); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> newModule(settings, Collections.emptyList())); - assertEquals("Unknown zen hosts provider [dne]", e.getMessage()); + assertEquals("Unknown zen hosts providers [dne]", e.getMessage()); } public void testDuplicateHostsProvider() { @@ -162,6 +161,37 @@ public class DiscoveryModuleTests extends ESTestCase { assertEquals("Cannot register zen hosts provider [dup] twice", e.getMessage()); } + public void testSettingsHostsProvider() { + DummyHostsProviderPlugin plugin = () -> Collections.singletonMap("settings", () -> null); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> + newModule(Settings.EMPTY, Arrays.asList(plugin))); + assertEquals("Cannot register zen hosts provider [settings] twice", e.getMessage()); + } + + public void testMultiHostsProvider() { + AtomicBoolean created1 = new AtomicBoolean(false); + DummyHostsProviderPlugin plugin1 = () -> Collections.singletonMap("provider1", () -> { + created1.set(true); + return hostsResolver -> Collections.emptyList(); + }); + AtomicBoolean created2 = new AtomicBoolean(false); + DummyHostsProviderPlugin plugin2 = () -> Collections.singletonMap("provider2", () -> { + created2.set(true); + return hostsResolver -> Collections.emptyList(); + }); + AtomicBoolean created3 = new AtomicBoolean(false); + DummyHostsProviderPlugin plugin3 = () -> Collections.singletonMap("provider3", () -> { + created3.set(true); + return hostsResolver -> Collections.emptyList(); + }); + Settings settings = Settings.builder().putList(DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING.getKey(), + "provider1", "provider3").build(); + newModule(settings, Arrays.asList(plugin1, plugin2, plugin3)); + assertTrue(created1.get()); + assertFalse(created2.get()); + assertTrue(created3.get()); + } + public void testLazyConstructionHostsProvider() { DummyHostsProviderPlugin plugin = () -> Collections.singletonMap("custom", () -> { diff --git a/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java b/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java index 33c87ea7f38..c3ffbb82081 100644 --- a/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java @@ -84,7 +84,7 @@ public class SingleNodeDiscoveryIT extends ESIntegTestCase { internalCluster().getInstance(TransportService.class); // try to ping the single node directly final UnicastHostsProvider provider = - () -> Collections.singletonList(nodeTransport.getLocalNode().getAddress()); + hostsResolver -> Collections.singletonList(nodeTransport.getLocalNode().getAddress()); final CountDownLatch latch = new CountDownLatch(1); final DiscoveryNodes nodes = DiscoveryNodes.builder() .add(nodeTransport.getLocalNode()) diff --git a/server/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java b/server/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java index 4aa75077431..eef926a1e12 100644 --- a/server/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java @@ -137,8 +137,6 @@ public class UnicastZenPingTests extends ESTestCase { } } - private static final UnicastHostsProvider EMPTY_HOSTS_PROVIDER = Collections::emptyList; - public void testSimplePings() throws IOException, InterruptedException, ExecutionException { // use ephemeral ports final Settings settings = Settings.builder().put("cluster.name", "test").put(TcpTransport.PORT.getKey(), 0).build(); @@ -182,7 +180,7 @@ public class UnicastZenPingTests extends ESTestCase { final ClusterState state = ClusterState.builder(new ClusterName("test")).version(randomNonNegativeLong()).build(); final ClusterState stateMismatch = ClusterState.builder(new ClusterName("mismatch")).version(randomNonNegativeLong()).build(); - Settings hostsSettings = Settings.builder() + final Settings hostsSettings = Settings.builder() .putList("discovery.zen.ping.unicast.hosts", NetworkAddress.format(new InetSocketAddress(handleA.address.address().getAddress(), handleA.address.address().getPort())), NetworkAddress.format(new InetSocketAddress(handleB.address.address().getAddress(), handleB.address.address().getPort())), @@ -196,22 +194,21 @@ public class UnicastZenPingTests extends ESTestCase { .blocks(ClusterBlocks.builder().addGlobalBlock(STATE_NOT_RECOVERED_BLOCK)) .nodes(DiscoveryNodes.builder().add(handleA.node).localNodeId("UZP_A")) .build(); - TestUnicastZenPing zenPingA = new TestUnicastZenPing(hostsSettings, threadPool, handleA, EMPTY_HOSTS_PROVIDER, () -> stateA); + TestUnicastZenPing zenPingA = new TestUnicastZenPing(hostsSettings, threadPool, handleA, () -> stateA); zenPingA.start(); closeables.push(zenPingA); ClusterState stateB = ClusterState.builder(state) .nodes(DiscoveryNodes.builder().add(handleB.node).localNodeId("UZP_B")) .build(); - TestUnicastZenPing zenPingB = new TestUnicastZenPing(hostsSettings, threadPool, handleB, EMPTY_HOSTS_PROVIDER, () -> stateB); + TestUnicastZenPing zenPingB = new TestUnicastZenPing(hostsSettings, threadPool, handleB, () -> stateB); zenPingB.start(); closeables.push(zenPingB); ClusterState stateC = ClusterState.builder(stateMismatch) .nodes(DiscoveryNodes.builder().add(handleC.node).localNodeId("UZP_C")) .build(); - TestUnicastZenPing zenPingC = new TestUnicastZenPing(hostsSettingsMismatch, threadPool, handleC, - EMPTY_HOSTS_PROVIDER, () -> stateC) { + TestUnicastZenPing zenPingC = new TestUnicastZenPing(hostsSettingsMismatch, threadPool, handleC, () -> stateC) { @Override protected Version getVersion() { return versionD; @@ -223,8 +220,7 @@ public class UnicastZenPingTests extends ESTestCase { ClusterState stateD = ClusterState.builder(stateMismatch) .nodes(DiscoveryNodes.builder().add(handleD.node).localNodeId("UZP_D")) .build(); - TestUnicastZenPing zenPingD = new TestUnicastZenPing(hostsSettingsMismatch, threadPool, handleD, - EMPTY_HOSTS_PROVIDER, () -> stateD); + TestUnicastZenPing zenPingD = new TestUnicastZenPing(hostsSettingsMismatch, threadPool, handleD, () -> stateD); zenPingD.start(); closeables.push(zenPingD); @@ -329,21 +325,21 @@ public class UnicastZenPingTests extends ESTestCase { .blocks(ClusterBlocks.builder().addGlobalBlock(STATE_NOT_RECOVERED_BLOCK)) .nodes(DiscoveryNodes.builder().add(handleA.node).localNodeId("UZP_A")) .build(); - final TestUnicastZenPing zenPingA = new TestUnicastZenPing(hostsSettings, threadPool, handleA, EMPTY_HOSTS_PROVIDER, () -> stateA); + final TestUnicastZenPing zenPingA = new TestUnicastZenPing(hostsSettings, threadPool, handleA, () -> stateA); zenPingA.start(); closeables.push(zenPingA); ClusterState stateB = ClusterState.builder(state) .nodes(DiscoveryNodes.builder().add(handleB.node).localNodeId("UZP_B")) .build(); - TestUnicastZenPing zenPingB = new TestUnicastZenPing(hostsSettings, threadPool, handleB, EMPTY_HOSTS_PROVIDER, () -> stateB); + TestUnicastZenPing zenPingB = new TestUnicastZenPing(hostsSettings, threadPool, handleB, () -> stateB); zenPingB.start(); closeables.push(zenPingB); ClusterState stateC = ClusterState.builder(state) .nodes(DiscoveryNodes.builder().add(handleC.node).localNodeId("UZP_C")) .build(); - TestUnicastZenPing zenPingC = new TestUnicastZenPing(hostsSettings, threadPool, handleC, EMPTY_HOSTS_PROVIDER, () -> stateC); + TestUnicastZenPing zenPingC = new TestUnicastZenPing(hostsSettings, threadPool, handleC, () -> stateC); zenPingC.start(); closeables.push(zenPingC); @@ -408,7 +404,7 @@ public class UnicastZenPingTests extends ESTestCase { Collections.emptySet()); closeables.push(transportService); final int limitPortCounts = randomIntBetween(1, 10); - final List transportAddresses = TestUnicastZenPing.resolveHostsLists( + final List transportAddresses = UnicastZenPing.resolveHostsLists( executorService, logger, Collections.singletonList("127.0.0.1"), @@ -452,7 +448,7 @@ public class UnicastZenPingTests extends ESTestCase { new TransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); closeables.push(transportService); - final List transportAddresses = TestUnicastZenPing.resolveHostsLists( + final List transportAddresses = UnicastZenPing.resolveHostsLists( executorService, logger, Collections.singletonList(NetworkAddress.format(loopbackAddress)), @@ -503,7 +499,7 @@ public class UnicastZenPingTests extends ESTestCase { Collections.emptySet()); closeables.push(transportService); - final List transportAddresses = TestUnicastZenPing.resolveHostsLists( + final List transportAddresses = UnicastZenPing.resolveHostsLists( executorService, logger, Arrays.asList(hostname), @@ -562,7 +558,7 @@ public class UnicastZenPingTests extends ESTestCase { closeables.push(transportService); final TimeValue resolveTimeout = TimeValue.timeValueSeconds(randomIntBetween(1, 3)); try { - final List transportAddresses = TestUnicastZenPing.resolveHostsLists( + final List transportAddresses = UnicastZenPing.resolveHostsLists( executorService, logger, Arrays.asList("hostname1", "hostname2"), @@ -610,6 +606,7 @@ public class UnicastZenPingTests extends ESTestCase { hostsSettingsBuilder.put("discovery.zen.ping.unicast.hosts", (String) null); } final Settings hostsSettings = hostsSettingsBuilder.build(); + final ClusterState state = ClusterState.builder(new ClusterName("test")).version(randomNonNegativeLong()).build(); // connection to reuse @@ -627,14 +624,14 @@ public class UnicastZenPingTests extends ESTestCase { .blocks(ClusterBlocks.builder().addGlobalBlock(STATE_NOT_RECOVERED_BLOCK)) .nodes(DiscoveryNodes.builder().add(handleA.node).add(handleB.node).localNodeId("UZP_A")) .build(); - final TestUnicastZenPing zenPingA = new TestUnicastZenPing(hostsSettings, threadPool, handleA, EMPTY_HOSTS_PROVIDER, () -> stateA); + final TestUnicastZenPing zenPingA = new TestUnicastZenPing(hostsSettings, threadPool, handleA, () -> stateA); zenPingA.start(); closeables.push(zenPingA); final ClusterState stateB = ClusterState.builder(state) .nodes(DiscoveryNodes.builder().add(handleB.node).localNodeId("UZP_B")) .build(); - TestUnicastZenPing zenPingB = new TestUnicastZenPing(hostsSettings, threadPool, handleB, EMPTY_HOSTS_PROVIDER, () -> stateB); + TestUnicastZenPing zenPingB = new TestUnicastZenPing(hostsSettings, threadPool, handleB, () -> stateB); zenPingB.start(); closeables.push(zenPingB); @@ -669,19 +666,20 @@ public class UnicastZenPingTests extends ESTestCase { .put("cluster.name", "test") .put("discovery.zen.ping.unicast.hosts", (String) null) // use nodes for simplicity .build(); + final ClusterState state = ClusterState.builder(new ClusterName("test")).version(randomNonNegativeLong()).build(); final ClusterState stateA = ClusterState.builder(state) .blocks(ClusterBlocks.builder().addGlobalBlock(STATE_NOT_RECOVERED_BLOCK)) .nodes(DiscoveryNodes.builder().add(handleA.node).add(handleB.node).localNodeId("UZP_A")).build(); - final TestUnicastZenPing zenPingA = new TestUnicastZenPing(hostsSettings, threadPool, handleA, EMPTY_HOSTS_PROVIDER, () -> stateA); + final TestUnicastZenPing zenPingA = new TestUnicastZenPing(hostsSettings, threadPool, handleA, () -> stateA); zenPingA.start(); closeables.push(zenPingA); // Node B doesn't know about A! final ClusterState stateB = ClusterState.builder(state).nodes( DiscoveryNodes.builder().add(handleB.node).localNodeId("UZP_B")).build(); - TestUnicastZenPing zenPingB = new TestUnicastZenPing(hostsSettings, threadPool, handleB, EMPTY_HOSTS_PROVIDER, () -> stateB); + TestUnicastZenPing zenPingB = new TestUnicastZenPing(hostsSettings, threadPool, handleB, () -> stateB); zenPingB.start(); closeables.push(zenPingB); @@ -728,7 +726,7 @@ public class UnicastZenPingTests extends ESTestCase { new TransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); closeables.push(transportService); - final List transportAddresses = TestUnicastZenPing.resolveHostsLists( + final List transportAddresses = UnicastZenPing.resolveHostsLists( executorService, logger, Arrays.asList("127.0.0.1:9300:9300", "127.0.0.1:9301"), @@ -828,9 +826,10 @@ public class UnicastZenPingTests extends ESTestCase { private static class TestUnicastZenPing extends UnicastZenPing { TestUnicastZenPing(Settings settings, ThreadPool threadPool, NetworkHandle networkHandle, - UnicastHostsProvider unicastHostsProvider, PingContextProvider contextProvider) { + PingContextProvider contextProvider) { super(Settings.builder().put("node.name", networkHandle.node.getName()).put(settings).build(), - threadPool, networkHandle.transportService, unicastHostsProvider, contextProvider); + threadPool, networkHandle.transportService, + new SettingsBasedHostsProvider(settings, networkHandle.transportService), contextProvider); } volatile CountDownLatch allTasksCompleted; diff --git a/server/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java b/server/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java index 9273ab15143..a60a23bcd6d 100644 --- a/server/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java @@ -317,7 +317,7 @@ public class ZenDiscoveryUnitTests extends ESTestCase { } }; ZenDiscovery zenDiscovery = new ZenDiscovery(settings, threadPool, service, new NamedWriteableRegistry(ClusterModule.getNamedWriteables()), - masterService, clusterApplier, clusterSettings, Collections::emptyList, ESAllocationTestCase.createAllocationService(), + masterService, clusterApplier, clusterSettings, hostsResolver -> Collections.emptyList(), ESAllocationTestCase.createAllocationService(), Collections.emptyList()); zenDiscovery.start(); return zenDiscovery; diff --git a/test/framework/src/main/java/org/elasticsearch/test/discovery/MockUncasedHostProvider.java b/test/framework/src/main/java/org/elasticsearch/test/discovery/MockUncasedHostProvider.java index 2e60a3c518d..dc9304637cd 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/discovery/MockUncasedHostProvider.java +++ b/test/framework/src/main/java/org/elasticsearch/test/discovery/MockUncasedHostProvider.java @@ -56,7 +56,7 @@ public final class MockUncasedHostProvider implements UnicastHostsProvider, Clos } @Override - public List buildDynamicHosts() { + public List buildDynamicHosts(HostsResolver hostsResolver) { final DiscoveryNode localNode = getNode(); assert localNode != null; synchronized (activeNodesPerCluster) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/discovery/TestZenDiscovery.java b/test/framework/src/main/java/org/elasticsearch/test/discovery/TestZenDiscovery.java index 11f9e38e665..5387a659aa2 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/discovery/TestZenDiscovery.java +++ b/test/framework/src/main/java/org/elasticsearch/test/discovery/TestZenDiscovery.java @@ -45,7 +45,7 @@ import java.util.List; import java.util.Map; import java.util.function.Supplier; -import static org.elasticsearch.discovery.zen.UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING; +import static org.elasticsearch.discovery.zen.SettingsBasedHostsProvider.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING; /** * A alternative zen discovery which allows using mocks for things like pings, as well as @@ -84,7 +84,7 @@ public class TestZenDiscovery extends ZenDiscovery { final Supplier supplier; if (USE_MOCK_PINGS.get(settings)) { // we have to return something in order for the unicast host provider setting to resolve to something. It will never be used - supplier = () -> () -> { + supplier = () -> hostsResolver -> { throw new UnsupportedOperationException(); }; } else { From c6cbc99f9c90c75e883393dc6b4691033ab73d72 Mon Sep 17 00:00:00 2001 From: Dimitris Athanasiou Date: Fri, 22 Jun 2018 15:13:31 +0100 Subject: [PATCH 3/8] [ML] Add ML filter update API (#31437) This adds an api to allow updating a filter: POST _xpack/ml/filters/{filter_id}/_update The request body may have: - description: setting a new description - add_items: a list of the items to add - remove_items: a list of the items to remove This commit also changes the PUT filter api to error when the filter_id is already used. As now there is an api for updating filters, the put api should only be used to create new ones. Also, updating a filter results into a notification message auditing the change for every job that is using that filter. --- .../xpack/core/XPackClientPlugin.java | 2 + .../core/ml/action/UpdateFilterAction.java | 187 ++++++++++++++++++ .../xpack/core/ml/job/config/MlFilter.java | 13 +- .../xpack/core/ml/job/messages/Messages.java | 2 + .../autodetect/state/ModelSnapshot.java | 4 +- .../xpack/core/ml/utils/ExceptionsHelper.java | 4 + .../UpdateFilterActionRequestTests.java | 58 ++++++ .../core/ml/job/config/MlFilterTests.java | 9 +- .../xpack/ml/MachineLearning.java | 5 + .../ml/action/TransportGetFiltersAction.java | 9 +- .../ml/action/TransportPutFilterAction.java | 37 ++-- .../action/TransportUpdateFilterAction.java | 173 ++++++++++++++++ .../xpack/ml/job/JobManager.java | 53 +++-- .../persistence/BatchedBucketsIterator.java | 4 +- .../BatchedInfluencersIterator.java | 4 +- .../persistence/BatchedRecordsIterator.java | 4 +- .../xpack/ml/job/persistence/JobProvider.java | 19 +- .../rest/filter/RestUpdateFilterAction.java | 41 ++++ .../xpack/ml/job/JobManagerTests.java | 90 ++++++++- .../api/xpack.ml.update_filter.json | 20 ++ .../test/ml/custom_all_field.yml | 2 + .../test/ml/delete_model_snapshot.yml | 2 + .../rest-api-spec/test/ml/filter_crud.yml | 68 ++++++- .../test/ml/get_model_snapshots.yml | 3 + .../rest-api-spec/test/ml/index_layout.yml | 2 + .../rest-api-spec/test/ml/jobs_crud.yml | 4 + .../test/ml/jobs_get_result_buckets.yml | 3 + .../test/ml/jobs_get_result_categories.yml | 3 + .../test/ml/jobs_get_result_influencers.yml | 3 + .../ml/jobs_get_result_overall_buckets.yml | 9 + .../test/ml/jobs_get_result_records.yml | 2 + .../rest-api-spec/test/ml/jobs_get_stats.yml | 2 + .../test/ml/ml_anomalies_default_mappings.yml | 1 + .../test/ml/revert_model_snapshot.yml | 9 + .../test/ml/update_model_snapshot.yml | 2 + .../ml/integration/DetectionRulesIT.java | 12 +- .../smoke-test-ml-with-security/build.gradle | 1 + 37 files changed, 794 insertions(+), 72 deletions(-) create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateFilterAction.java create mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/UpdateFilterActionRequestTests.java create mode 100644 x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateFilterAction.java create mode 100644 x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/filter/RestUpdateFilterAction.java create mode 100644 x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.update_filter.json diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java index 2894138248b..0bf6601593d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java @@ -84,6 +84,7 @@ import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction; import org.elasticsearch.xpack.core.ml.action.StopDatafeedAction; import org.elasticsearch.xpack.core.ml.action.UpdateCalendarJobAction; import org.elasticsearch.xpack.core.ml.action.UpdateDatafeedAction; +import org.elasticsearch.xpack.core.ml.action.UpdateFilterAction; import org.elasticsearch.xpack.core.ml.action.UpdateJobAction; import org.elasticsearch.xpack.core.ml.action.UpdateModelSnapshotAction; import org.elasticsearch.xpack.core.ml.action.UpdateProcessAction; @@ -220,6 +221,7 @@ public class XPackClientPlugin extends Plugin implements ActionPlugin, NetworkPl OpenJobAction.INSTANCE, GetFiltersAction.INSTANCE, PutFilterAction.INSTANCE, + UpdateFilterAction.INSTANCE, DeleteFilterAction.INSTANCE, KillProcessAction.INSTANCE, GetBucketsAction.INSTANCE, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateFilterAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateFilterAction.java new file mode 100644 index 00000000000..57b3d3457d7 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateFilterAction.java @@ -0,0 +1,187 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ml.job.config.MlFilter; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.Objects; +import java.util.SortedSet; +import java.util.TreeSet; + + +public class UpdateFilterAction extends Action { + + public static final UpdateFilterAction INSTANCE = new UpdateFilterAction(); + public static final String NAME = "cluster:admin/xpack/ml/filters/update"; + + private UpdateFilterAction() { + super(NAME); + } + + @Override + public PutFilterAction.Response newResponse() { + return new PutFilterAction.Response(); + } + + public static class Request extends ActionRequest implements ToXContentObject { + + public static final ParseField ADD_ITEMS = new ParseField("add_items"); + public static final ParseField REMOVE_ITEMS = new ParseField("remove_items"); + + private static final ObjectParser PARSER = new ObjectParser<>(NAME, Request::new); + + static { + PARSER.declareString((request, filterId) -> request.filterId = filterId, MlFilter.ID); + PARSER.declareStringOrNull(Request::setDescription, MlFilter.DESCRIPTION); + PARSER.declareStringArray(Request::setAddItems, ADD_ITEMS); + PARSER.declareStringArray(Request::setRemoveItems, REMOVE_ITEMS); + } + + public static Request parseRequest(String filterId, XContentParser parser) { + Request request = PARSER.apply(parser, null); + if (request.filterId == null) { + request.filterId = filterId; + } else if (!Strings.isNullOrEmpty(filterId) && !filterId.equals(request.filterId)) { + // If we have both URI and body filter ID, they must be identical + throw new IllegalArgumentException(Messages.getMessage(Messages.INCONSISTENT_ID, MlFilter.ID.getPreferredName(), + request.filterId, filterId)); + } + return request; + } + + private String filterId; + @Nullable + private String description; + private SortedSet addItems = Collections.emptySortedSet(); + private SortedSet removeItems = Collections.emptySortedSet(); + + public Request() { + } + + public Request(String filterId) { + this.filterId = ExceptionsHelper.requireNonNull(filterId, MlFilter.ID.getPreferredName()); + } + + public String getFilterId() { + return filterId; + } + + public String getDescription() { + return description; + } + + public void setDescription(String description) { + this.description = description; + } + + public SortedSet getAddItems() { + return addItems; + } + + public void setAddItems(Collection addItems) { + this.addItems = new TreeSet<>(ExceptionsHelper.requireNonNull(addItems, ADD_ITEMS.getPreferredName())); + } + + public SortedSet getRemoveItems() { + return removeItems; + } + + public void setRemoveItems(Collection removeItems) { + this.removeItems = new TreeSet<>(ExceptionsHelper.requireNonNull(removeItems, REMOVE_ITEMS.getPreferredName())); + } + + public boolean isNoop() { + return description == null && addItems.isEmpty() && removeItems.isEmpty(); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + filterId = in.readString(); + description = in.readOptionalString(); + addItems = new TreeSet<>(Arrays.asList(in.readStringArray())); + removeItems = new TreeSet<>(Arrays.asList(in.readStringArray())); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(filterId); + out.writeOptionalString(description); + out.writeStringArray(addItems.toArray(new String[addItems.size()])); + out.writeStringArray(removeItems.toArray(new String[removeItems.size()])); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(MlFilter.ID.getPreferredName(), filterId); + if (description != null) { + builder.field(MlFilter.DESCRIPTION.getPreferredName(), description); + } + if (addItems.isEmpty() == false) { + builder.field(ADD_ITEMS.getPreferredName(), addItems); + } + if (removeItems.isEmpty() == false) { + builder.field(REMOVE_ITEMS.getPreferredName(), removeItems); + } + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(filterId, description, addItems, removeItems); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Request other = (Request) obj; + return Objects.equals(filterId, other.filterId) + && Objects.equals(description, other.description) + && Objects.equals(addItems, other.addItems) + && Objects.equals(removeItems, other.removeItems); + } + } + + public static class RequestBuilder extends ActionRequestBuilder { + + public RequestBuilder(ElasticsearchClient client) { + super(client, INSTANCE, new Request()); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/MlFilter.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/MlFilter.java index b11dfd47651..b45ce73f124 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/MlFilter.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/MlFilter.java @@ -56,7 +56,7 @@ public class MlFilter implements ToXContentObject, Writeable { private final String description; private final SortedSet items; - public MlFilter(String id, String description, SortedSet items) { + private MlFilter(String id, String description, SortedSet items) { this.id = Objects.requireNonNull(id, ID.getPreferredName() + " must not be null"); this.description = description; this.items = Objects.requireNonNull(items, ITEMS.getPreferredName() + " must not be null"); @@ -69,8 +69,7 @@ public class MlFilter implements ToXContentObject, Writeable { } else { description = null; } - items = new TreeSet<>(); - items.addAll(Arrays.asList(in.readStringArray())); + items = new TreeSet<>(Arrays.asList(in.readStringArray())); } @Override @@ -163,9 +162,13 @@ public class MlFilter implements ToXContentObject, Writeable { return this; } + public Builder setItems(SortedSet items) { + this.items = items; + return this; + } + public Builder setItems(List items) { - this.items = new TreeSet<>(); - this.items.addAll(items); + this.items = new TreeSet<>(items); return this; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java index 79d8f068d91..f0329051fed 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java @@ -42,6 +42,8 @@ public final class Messages { public static final String DATAFEED_FREQUENCY_MUST_BE_MULTIPLE_OF_AGGREGATIONS_INTERVAL = "Datafeed frequency [{0}] must be a multiple of the aggregation interval [{1}]"; + public static final String FILTER_NOT_FOUND = "No filter with id [{0}] exists"; + public static final String INCONSISTENT_ID = "Inconsistent {0}; ''{1}'' specified in the body differs from ''{2}'' specified as a URL argument"; public static final String INVALID_ID = "Invalid {0}; ''{1}'' can contain lowercase alphanumeric (a-z and 0-9), hyphens or " + diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSnapshot.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSnapshot.java index 1588298918e..03487500d8a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSnapshot.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSnapshot.java @@ -19,9 +19,9 @@ import org.elasticsearch.common.xcontent.ObjectParser.ValueType; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.Token; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.utils.time.TimeUtils; @@ -345,7 +345,7 @@ public class ModelSnapshot implements ToXContentObject, Writeable { public static ModelSnapshot fromJson(BytesReference bytesReference) { try (InputStream stream = bytesReference.streamInput(); - XContentParser parser = XContentFactory.xContent(XContentHelper.xContentType(bytesReference)) + XContentParser parser = XContentFactory.xContent(XContentType.JSON) .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { return LENIENT_PARSER.apply(parser, null).build(); } catch (IOException e) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/ExceptionsHelper.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/ExceptionsHelper.java index 150c539b1ae..d5b83d25ce3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/ExceptionsHelper.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/ExceptionsHelper.java @@ -38,6 +38,10 @@ public class ExceptionsHelper { return new ElasticsearchException(msg, cause); } + public static ElasticsearchStatusException conflictStatusException(String msg, Throwable cause, Object... args) { + return new ElasticsearchStatusException(msg, RestStatus.CONFLICT, cause, args); + } + public static ElasticsearchStatusException conflictStatusException(String msg, Object... args) { return new ElasticsearchStatusException(msg, RestStatus.CONFLICT, args); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/UpdateFilterActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/UpdateFilterActionRequestTests.java new file mode 100644 index 00000000000..f07eba7e90e --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/UpdateFilterActionRequestTests.java @@ -0,0 +1,58 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.xpack.core.ml.action.UpdateFilterAction.Request; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; + +public class UpdateFilterActionRequestTests extends AbstractStreamableXContentTestCase { + + private String filterId = randomAlphaOfLength(20); + + @Override + protected Request createTestInstance() { + UpdateFilterAction.Request request = new UpdateFilterAction.Request(filterId); + if (randomBoolean()) { + request.setDescription(randomAlphaOfLength(20)); + } + if (randomBoolean()) { + request.setAddItems(generateRandomStrings()); + } + if (randomBoolean()) { + request.setRemoveItems(generateRandomStrings()); + } + return request; + } + + private static Collection generateRandomStrings() { + int size = randomIntBetween(0, 10); + List strings = new ArrayList<>(size); + for (int i = 0; i < size; ++i) { + strings.add(randomAlphaOfLength(20)); + } + return strings; + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected Request createBlankInstance() { + return new Request(); + } + + @Override + protected Request doParseInstance(XContentParser parser) { + return Request.parseRequest(filterId, parser); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/MlFilterTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/MlFilterTests.java index 9ac6683f004..c8d8527dc01 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/MlFilterTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/MlFilterTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.test.AbstractSerializingTestCase; import java.io.IOException; +import java.util.SortedSet; import java.util.TreeSet; import static org.hamcrest.Matchers.contains; @@ -43,7 +44,7 @@ public class MlFilterTests extends AbstractSerializingTestCase { for (int i = 0; i < size; i++) { items.add(randomAlphaOfLengthBetween(1, 20)); } - return new MlFilter(filterId, description, items); + return MlFilter.builder(filterId).setDescription(description).setItems(items).build(); } @Override @@ -57,13 +58,13 @@ public class MlFilterTests extends AbstractSerializingTestCase { } public void testNullId() { - NullPointerException ex = expectThrows(NullPointerException.class, () -> new MlFilter(null, "", new TreeSet<>())); + NullPointerException ex = expectThrows(NullPointerException.class, () -> MlFilter.builder(null).build()); assertEquals(MlFilter.ID.getPreferredName() + " must not be null", ex.getMessage()); } public void testNullItems() { - NullPointerException ex = - expectThrows(NullPointerException.class, () -> new MlFilter(randomAlphaOfLengthBetween(1, 20), "", null)); + NullPointerException ex = expectThrows(NullPointerException.class, + () -> MlFilter.builder(randomAlphaOfLength(20)).setItems((SortedSet) null).build()); assertEquals(MlFilter.ITEMS.getPreferredName() + " must not be null", ex.getMessage()); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java index a1714a8e3f5..3d1011c47e2 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java @@ -97,6 +97,7 @@ import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction; import org.elasticsearch.xpack.core.ml.action.StopDatafeedAction; import org.elasticsearch.xpack.core.ml.action.UpdateCalendarJobAction; import org.elasticsearch.xpack.core.ml.action.UpdateDatafeedAction; +import org.elasticsearch.xpack.core.ml.action.UpdateFilterAction; import org.elasticsearch.xpack.core.ml.action.UpdateJobAction; import org.elasticsearch.xpack.core.ml.action.UpdateModelSnapshotAction; import org.elasticsearch.xpack.core.ml.action.UpdateProcessAction; @@ -148,6 +149,7 @@ import org.elasticsearch.xpack.ml.action.TransportStartDatafeedAction; import org.elasticsearch.xpack.ml.action.TransportStopDatafeedAction; import org.elasticsearch.xpack.ml.action.TransportUpdateCalendarJobAction; import org.elasticsearch.xpack.ml.action.TransportUpdateDatafeedAction; +import org.elasticsearch.xpack.ml.action.TransportUpdateFilterAction; import org.elasticsearch.xpack.ml.action.TransportUpdateJobAction; import org.elasticsearch.xpack.ml.action.TransportUpdateModelSnapshotAction; import org.elasticsearch.xpack.ml.action.TransportUpdateProcessAction; @@ -196,6 +198,7 @@ import org.elasticsearch.xpack.ml.rest.datafeeds.RestUpdateDatafeedAction; import org.elasticsearch.xpack.ml.rest.filter.RestDeleteFilterAction; import org.elasticsearch.xpack.ml.rest.filter.RestGetFiltersAction; import org.elasticsearch.xpack.ml.rest.filter.RestPutFilterAction; +import org.elasticsearch.xpack.ml.rest.filter.RestUpdateFilterAction; import org.elasticsearch.xpack.ml.rest.job.RestCloseJobAction; import org.elasticsearch.xpack.ml.rest.job.RestDeleteJobAction; import org.elasticsearch.xpack.ml.rest.job.RestFlushJobAction; @@ -460,6 +463,7 @@ public class MachineLearning extends Plugin implements ActionPlugin, AnalysisPlu new RestOpenJobAction(settings, restController), new RestGetFiltersAction(settings, restController), new RestPutFilterAction(settings, restController), + new RestUpdateFilterAction(settings, restController), new RestDeleteFilterAction(settings, restController), new RestGetInfluencersAction(settings, restController), new RestGetRecordsAction(settings, restController), @@ -511,6 +515,7 @@ public class MachineLearning extends Plugin implements ActionPlugin, AnalysisPlu new ActionHandler<>(OpenJobAction.INSTANCE, TransportOpenJobAction.class), new ActionHandler<>(GetFiltersAction.INSTANCE, TransportGetFiltersAction.class), new ActionHandler<>(PutFilterAction.INSTANCE, TransportPutFilterAction.class), + new ActionHandler<>(UpdateFilterAction.INSTANCE, TransportUpdateFilterAction.class), new ActionHandler<>(DeleteFilterAction.INSTANCE, TransportDeleteFilterAction.class), new ActionHandler<>(KillProcessAction.INSTANCE, TransportKillProcessAction.class), new ActionHandler<>(GetBucketsAction.INSTANCE, TransportGetBucketsAction.class), diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetFiltersAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetFiltersAction.java index 1be7be4a5d2..c8cd7a0d63b 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetFiltersAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetFiltersAction.java @@ -21,8 +21,8 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.builder.SearchSourceBuilder; @@ -80,9 +80,8 @@ public class TransportGetFiltersAction extends HandledTransportAction(Collections.singletonList(filter), 1, MlFilter.RESULTS_FIELD); @@ -122,7 +121,7 @@ public class TransportGetFiltersAction extends HandledTransportAction { private final Client client; - private final JobManager jobManager; @Inject - public TransportPutFilterAction(Settings settings, TransportService transportService, ActionFilters actionFilters, - Client client, JobManager jobManager) { + public TransportPutFilterAction(Settings settings, TransportService transportService, ActionFilters actionFilters, Client client) { super(settings, PutFilterAction.NAME, transportService, actionFilters, - (Supplier) PutFilterAction.Request::new); + (Supplier) PutFilterAction.Request::new); this.client = client; - this.jobManager = jobManager; } @Override protected void doExecute(PutFilterAction.Request request, ActionListener listener) { MlFilter filter = request.getFilter(); IndexRequest indexRequest = new IndexRequest(MlMetaIndex.INDEX_NAME, MlMetaIndex.TYPE, filter.documentId()); + indexRequest.opType(DocWriteRequest.OpType.CREATE); + indexRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); try (XContentBuilder builder = XContentFactory.jsonBuilder()) { ToXContent.MapParams params = new ToXContent.MapParams(Collections.singletonMap(MlMetaIndex.INCLUDE_TYPE_KEY, "true")); indexRequest.source(filter.toXContent(builder, params)); } catch (IOException e) { throw new IllegalStateException("Failed to serialise filter with id [" + filter.getId() + "]", e); } - BulkRequestBuilder bulkRequestBuilder = client.prepareBulk(); - bulkRequestBuilder.add(indexRequest); - bulkRequestBuilder.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); - executeAsyncWithOrigin(client, ML_ORIGIN, BulkAction.INSTANCE, bulkRequestBuilder.request(), - new ActionListener() { + executeAsyncWithOrigin(client, ML_ORIGIN, IndexAction.INSTANCE, indexRequest, + new ActionListener() { @Override - public void onResponse(BulkResponse indexResponse) { - jobManager.updateProcessOnFilterChanged(filter); + public void onResponse(IndexResponse indexResponse) { listener.onResponse(new PutFilterAction.Response(filter)); } @Override public void onFailure(Exception e) { - listener.onFailure(ExceptionsHelper.serverError("Error putting filter with id [" + filter.getId() + "]", e)); + Exception reportedException; + if (e instanceof VersionConflictEngineException) { + reportedException = new ResourceAlreadyExistsException("A filter with id [" + filter.getId() + + "] already exists"); + } else { + reportedException = ExceptionsHelper.serverError("Error putting filter with id [" + filter.getId() + "]", e); + } + listener.onFailure(reportedException); } }); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateFilterAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateFilterAction.java new file mode 100644 index 00000000000..37f550fbb02 --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateFilterAction.java @@ -0,0 +1,173 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.action; + +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.get.GetAction; +import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.index.IndexAction; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.engine.VersionConflictEngineException; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.ml.MlMetaIndex; +import org.elasticsearch.xpack.core.ml.action.PutFilterAction; +import org.elasticsearch.xpack.core.ml.action.UpdateFilterAction; +import org.elasticsearch.xpack.core.ml.job.config.MlFilter; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.ml.job.JobManager; + +import java.io.IOException; +import java.io.InputStream; +import java.util.Collections; +import java.util.SortedSet; +import java.util.TreeSet; +import java.util.function.Supplier; + +import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; + +public class TransportUpdateFilterAction extends HandledTransportAction { + + private final Client client; + private final JobManager jobManager; + + @Inject + public TransportUpdateFilterAction(Settings settings, TransportService transportService, ActionFilters actionFilters, Client client, + JobManager jobManager) { + super(settings, UpdateFilterAction.NAME, transportService, actionFilters, + (Supplier) UpdateFilterAction.Request::new); + this.client = client; + this.jobManager = jobManager; + } + + @Override + protected void doExecute(UpdateFilterAction.Request request, ActionListener listener) { + ActionListener filterListener = ActionListener.wrap(filterWithVersion -> { + updateFilter(filterWithVersion, request, listener); + }, listener::onFailure); + + getFilterWithVersion(request.getFilterId(), filterListener); + } + + private void updateFilter(FilterWithVersion filterWithVersion, UpdateFilterAction.Request request, + ActionListener listener) { + MlFilter filter = filterWithVersion.filter; + + if (request.isNoop()) { + listener.onResponse(new PutFilterAction.Response(filter)); + return; + } + + String description = request.getDescription() == null ? filter.getDescription() : request.getDescription(); + SortedSet items = new TreeSet<>(filter.getItems()); + items.addAll(request.getAddItems()); + + // Check if removed items are present to avoid typos + for (String toRemove : request.getRemoveItems()) { + boolean wasPresent = items.remove(toRemove); + if (wasPresent == false) { + listener.onFailure(ExceptionsHelper.badRequestException("Cannot remove item [" + toRemove + + "] as it is not present in filter [" + filter.getId() + "]")); + return; + } + } + + MlFilter updatedFilter = MlFilter.builder(filter.getId()).setDescription(description).setItems(items).build(); + indexUpdatedFilter(updatedFilter, filterWithVersion.version, request, listener); + } + + private void indexUpdatedFilter(MlFilter filter, long version, UpdateFilterAction.Request request, + ActionListener listener) { + IndexRequest indexRequest = new IndexRequest(MlMetaIndex.INDEX_NAME, MlMetaIndex.TYPE, filter.documentId()); + indexRequest.version(version); + indexRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + + try (XContentBuilder builder = XContentFactory.jsonBuilder()) { + ToXContent.MapParams params = new ToXContent.MapParams(Collections.singletonMap(MlMetaIndex.INCLUDE_TYPE_KEY, "true")); + indexRequest.source(filter.toXContent(builder, params)); + } catch (IOException e) { + throw new IllegalStateException("Failed to serialise filter with id [" + filter.getId() + "]", e); + } + + executeAsyncWithOrigin(client, ML_ORIGIN, IndexAction.INSTANCE, indexRequest, new ActionListener() { + @Override + public void onResponse(IndexResponse indexResponse) { + jobManager.notifyFilterChanged(filter, request.getAddItems(), request.getRemoveItems()); + listener.onResponse(new PutFilterAction.Response(filter)); + } + + @Override + public void onFailure(Exception e) { + Exception reportedException; + if (e instanceof VersionConflictEngineException) { + reportedException = ExceptionsHelper.conflictStatusException("Error updating filter with id [" + filter.getId() + + "] because it was modified while the update was in progress", e); + } else { + reportedException = ExceptionsHelper.serverError("Error updating filter with id [" + filter.getId() + "]", e); + } + listener.onFailure(reportedException); + } + }); + } + + private void getFilterWithVersion(String filterId, ActionListener listener) { + GetRequest getRequest = new GetRequest(MlMetaIndex.INDEX_NAME, MlMetaIndex.TYPE, MlFilter.documentId(filterId)); + executeAsyncWithOrigin(client, ML_ORIGIN, GetAction.INSTANCE, getRequest, new ActionListener() { + @Override + public void onResponse(GetResponse getDocResponse) { + try { + if (getDocResponse.isExists()) { + BytesReference docSource = getDocResponse.getSourceAsBytesRef(); + try (InputStream stream = docSource.streamInput(); + XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { + MlFilter filter = MlFilter.LENIENT_PARSER.apply(parser, null).build(); + listener.onResponse(new FilterWithVersion(filter, getDocResponse.getVersion())); + } + } else { + this.onFailure(new ResourceNotFoundException(Messages.getMessage(Messages.FILTER_NOT_FOUND, filterId))); + } + } catch (Exception e) { + this.onFailure(e); + } + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + }); + } + + private static class FilterWithVersion { + + private final MlFilter filter; + private final long version; + + private FilterWithVersion(MlFilter filter, long version) { + this.filter = filter; + this.version = version; + } + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java index fe6deea55e3..c3d31ae10e9 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java @@ -403,26 +403,55 @@ public class JobManager extends AbstractComponent { return buildNewClusterState(currentState, builder); } - public void updateProcessOnFilterChanged(MlFilter filter) { + public void notifyFilterChanged(MlFilter filter, Set addedItems, Set removedItems) { + if (addedItems.isEmpty() && removedItems.isEmpty()) { + return; + } + ClusterState clusterState = clusterService.state(); QueryPage jobs = expandJobs("*", true, clusterService.state()); for (Job job : jobs.results()) { - if (isJobOpen(clusterState, job.getId())) { - Set jobFilters = job.getAnalysisConfig().extractReferencedFilters(); - if (jobFilters.contains(filter.getId())) { - updateJobProcessNotifier.submitJobUpdate(UpdateParams.filterUpdate(job.getId(), filter), ActionListener.wrap( - isUpdated -> { - if (isUpdated) { - auditor.info(job.getId(), - Messages.getMessage(Messages.JOB_AUDIT_FILTER_UPDATED_ON_PROCESS, filter.getId())); - } - }, e -> {} - )); + Set jobFilters = job.getAnalysisConfig().extractReferencedFilters(); + if (jobFilters.contains(filter.getId())) { + if (isJobOpen(clusterState, job.getId())) { + updateJobProcessNotifier.submitJobUpdate(UpdateParams.filterUpdate(job.getId(), filter), + ActionListener.wrap(isUpdated -> { + auditFilterChanges(job.getId(), filter.getId(), addedItems, removedItems); + }, e -> {})); + } else { + auditFilterChanges(job.getId(), filter.getId(), addedItems, removedItems); } } } } + private void auditFilterChanges(String jobId, String filterId, Set addedItems, Set removedItems) { + StringBuilder auditMsg = new StringBuilder("Filter ["); + auditMsg.append(filterId); + auditMsg.append("] has been modified; "); + + if (addedItems.isEmpty() == false) { + auditMsg.append("added items: "); + appendCommaSeparatedSet(addedItems, auditMsg); + if (removedItems.isEmpty() == false) { + auditMsg.append(", "); + } + } + + if (removedItems.isEmpty() == false) { + auditMsg.append("removed items: "); + appendCommaSeparatedSet(removedItems, auditMsg); + } + + auditor.info(jobId, auditMsg.toString()); + } + + private static void appendCommaSeparatedSet(Set items, StringBuilder sb) { + sb.append("["); + Strings.collectionToDelimitedString(items, ", ", "'", "'", sb); + sb.append("]"); + } + public void updateProcessOnCalendarChanged(List calendarJobIds) { ClusterState clusterState = clusterService.state(); MlMetadata mlMetadata = MlMetadata.getMlMetadata(clusterState); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedBucketsIterator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedBucketsIterator.java index 17b4b8edadf..53526e2a475 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedBucketsIterator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedBucketsIterator.java @@ -11,8 +11,8 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.search.SearchHit; import org.elasticsearch.xpack.core.ml.job.results.Bucket; import org.elasticsearch.xpack.core.ml.job.results.Result; @@ -30,7 +30,7 @@ class BatchedBucketsIterator extends BatchedResultsIterator { protected Result map(SearchHit hit) { BytesReference source = hit.getSourceRef(); try (InputStream stream = source.streamInput(); - XContentParser parser = XContentFactory.xContent(XContentHelper.xContentType(source)).createParser(NamedXContentRegistry.EMPTY, + XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { Bucket bucket = Bucket.LENIENT_PARSER.apply(parser, null); return new Result<>(hit.getIndex(), bucket); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedInfluencersIterator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedInfluencersIterator.java index d084325350f..fe8bd3aaa3a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedInfluencersIterator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedInfluencersIterator.java @@ -11,8 +11,8 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.search.SearchHit; import org.elasticsearch.xpack.core.ml.job.results.Influencer; import org.elasticsearch.xpack.core.ml.job.results.Result; @@ -29,7 +29,7 @@ class BatchedInfluencersIterator extends BatchedResultsIterator { protected Result map(SearchHit hit) { BytesReference source = hit.getSourceRef(); try (InputStream stream = source.streamInput(); - XContentParser parser = XContentFactory.xContent(XContentHelper.xContentType(source)).createParser(NamedXContentRegistry.EMPTY, + XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { Influencer influencer = Influencer.LENIENT_PARSER.apply(parser, null); return new Result<>(hit.getIndex(), influencer); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedRecordsIterator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedRecordsIterator.java index c0940dfd5aa..22c107f771b 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedRecordsIterator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedRecordsIterator.java @@ -11,8 +11,8 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.search.SearchHit; import org.elasticsearch.xpack.core.ml.job.results.AnomalyRecord; import org.elasticsearch.xpack.core.ml.job.results.Result; @@ -30,7 +30,7 @@ class BatchedRecordsIterator extends BatchedResultsIterator { protected Result map(SearchHit hit) { BytesReference source = hit.getSourceRef(); try (InputStream stream = source.streamInput(); - XContentParser parser = XContentFactory.xContent(XContentHelper.xContentType(source)).createParser(NamedXContentRegistry.EMPTY, + XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)){ AnomalyRecord record = AnomalyRecord.LENIENT_PARSER.apply(parser, null); return new Result<>(hit.getIndex(), record); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobProvider.java index 9db1877df18..578ddd1efc7 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobProvider.java @@ -50,7 +50,6 @@ import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexNotFoundException; @@ -477,7 +476,7 @@ public class JobProvider { Consumer errorHandler) { BytesReference source = hit.getSourceRef(); try (InputStream stream = source.streamInput(); - XContentParser parser = XContentFactory.xContent(XContentHelper.xContentType(source)) + XContentParser parser = XContentFactory.xContent(XContentType.JSON) .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { return objectParser.apply(parser, null); } catch (IOException e) { @@ -528,7 +527,7 @@ public class JobProvider { for (SearchHit hit : hits.getHits()) { BytesReference source = hit.getSourceRef(); try (InputStream stream = source.streamInput(); - XContentParser parser = XContentFactory.xContent(XContentHelper.xContentType(source)) + XContentParser parser = XContentFactory.xContent(XContentType.JSON) .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { Bucket bucket = Bucket.LENIENT_PARSER.apply(parser, null); results.add(bucket); @@ -661,7 +660,7 @@ public class JobProvider { for (SearchHit hit : hits) { BytesReference source = hit.getSourceRef(); try (InputStream stream = source.streamInput(); - XContentParser parser = XContentFactory.xContent(XContentHelper.xContentType(source)) + XContentParser parser = XContentFactory.xContent(XContentType.JSON) .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { CategoryDefinition categoryDefinition = CategoryDefinition.LENIENT_PARSER.apply(parser, null); if (augment) { @@ -710,7 +709,7 @@ public class JobProvider { for (SearchHit hit : searchResponse.getHits().getHits()) { BytesReference source = hit.getSourceRef(); try (InputStream stream = source.streamInput(); - XContentParser parser = XContentFactory.xContent(XContentHelper.xContentType(source)) + XContentParser parser = XContentFactory.xContent(XContentType.JSON) .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { results.add(AnomalyRecord.LENIENT_PARSER.apply(parser, null)); } catch (IOException e) { @@ -759,7 +758,7 @@ public class JobProvider { for (SearchHit hit : response.getHits().getHits()) { BytesReference source = hit.getSourceRef(); try (InputStream stream = source.streamInput(); - XContentParser parser = XContentFactory.xContent(XContentHelper.xContentType(source)) + XContentParser parser = XContentFactory.xContent(XContentType.JSON) .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { influencers.add(Influencer.LENIENT_PARSER.apply(parser, null)); } catch (IOException e) { @@ -904,7 +903,7 @@ public class JobProvider { for (SearchHit hit : searchResponse.getHits().getHits()) { BytesReference source = hit.getSourceRef(); try (InputStream stream = source.streamInput(); - XContentParser parser = XContentFactory.xContent(XContentHelper.xContentType(source)) + XContentParser parser = XContentFactory.xContent(XContentType.JSON) .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { ModelPlot modelPlot = ModelPlot.LENIENT_PARSER.apply(parser, null); results.add(modelPlot); @@ -1232,10 +1231,8 @@ public class JobProvider { BytesReference docSource = getDocResponse.getSourceAsBytesRef(); try (InputStream stream = docSource.streamInput(); - XContentParser parser = - XContentFactory.xContent(XContentHelper.xContentType(docSource)) - .createParser(NamedXContentRegistry.EMPTY, - LoggingDeprecationHandler.INSTANCE, stream)) { + XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { Calendar calendar = Calendar.LENIENT_PARSER.apply(parser, null).build(); listener.onResponse(calendar); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/filter/RestUpdateFilterAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/filter/RestUpdateFilterAction.java new file mode 100644 index 00000000000..80acf3d7e4e --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/filter/RestUpdateFilterAction.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.rest.filter; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.ml.action.UpdateFilterAction; +import org.elasticsearch.xpack.core.ml.job.config.MlFilter; +import org.elasticsearch.xpack.ml.MachineLearning; + +import java.io.IOException; + +public class RestUpdateFilterAction extends BaseRestHandler { + + public RestUpdateFilterAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(RestRequest.Method.POST, + MachineLearning.BASE_PATH + "filters/{" + MlFilter.ID.getPreferredName() + "}/_update", this); + } + + @Override + public String getName() { + return "xpack_ml_update_filter_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + String filterId = restRequest.param(MlFilter.ID.getPreferredName()); + XContentParser parser = restRequest.contentOrSourceParamParser(); + UpdateFilterAction.Request putFilterRequest = UpdateFilterAction.Request.parseRequest(filterId, parser); + return channel -> client.execute(UpdateFilterAction.INSTANCE, putFilterRequest, new RestToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/JobManagerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/JobManagerTests.java index 42b0a56f49a..cf925963c19 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/JobManagerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/JobManagerTests.java @@ -41,12 +41,14 @@ import org.elasticsearch.xpack.ml.notifications.Auditor; import org.junit.Before; import org.mockito.ArgumentCaptor; import org.mockito.Matchers; +import org.mockito.Mockito; import java.io.IOException; import java.util.Arrays; import java.util.Collections; import java.util.Date; import java.util.List; +import java.util.TreeSet; import static org.elasticsearch.xpack.core.ml.job.config.JobTests.buildJobBuilder; import static org.elasticsearch.xpack.ml.action.TransportOpenJobActionTests.addJobTask; @@ -174,7 +176,16 @@ public class JobManagerTests extends ESTestCase { }); } - public void testUpdateProcessOnFilterChanged() { + public void testNotifyFilterChangedGivenNoop() { + MlFilter filter = MlFilter.builder("my_filter").build(); + JobManager jobManager = createJobManager(); + + jobManager.notifyFilterChanged(filter, Collections.emptySet(), Collections.emptySet()); + + Mockito.verifyNoMoreInteractions(auditor, updateJobProcessNotifier); + } + + public void testNotifyFilterChanged() { Detector.Builder detectorReferencingFilter = new Detector.Builder("count", null); detectorReferencingFilter.setByFieldName("foo"); DetectionRule filterRule = new DetectionRule.Builder(RuleScope.builder().exclude("foo", "foo_filter")).build(); @@ -208,11 +219,18 @@ public class JobManagerTests extends ESTestCase { .build(); when(clusterService.state()).thenReturn(clusterState); + doAnswer(invocationOnMock -> { + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; + listener.onResponse(true); + return null; + }).when(updateJobProcessNotifier).submitJobUpdate(any(), any()); + JobManager jobManager = createJobManager(); MlFilter filter = MlFilter.builder("foo_filter").setItems("a", "b").build(); - jobManager.updateProcessOnFilterChanged(filter); + jobManager.notifyFilterChanged(filter, new TreeSet<>(Arrays.asList("item 1", "item 2")), + new TreeSet<>(Collections.singletonList("item 3"))); ArgumentCaptor updateParamsCaptor = ArgumentCaptor.forClass(UpdateParams.class); verify(updateJobProcessNotifier, times(2)).submitJobUpdate(updateParamsCaptor.capture(), any(ActionListener.class)); @@ -223,6 +241,74 @@ public class JobManagerTests extends ESTestCase { assertThat(capturedUpdateParams.get(0).getFilter(), equalTo(filter)); assertThat(capturedUpdateParams.get(1).getJobId(), equalTo(jobReferencingFilter2.getId())); assertThat(capturedUpdateParams.get(1).getFilter(), equalTo(filter)); + + verify(auditor).info(jobReferencingFilter1.getId(), "Filter [foo_filter] has been modified; added items: " + + "['item 1', 'item 2'], removed items: ['item 3']"); + verify(auditor).info(jobReferencingFilter2.getId(), "Filter [foo_filter] has been modified; added items: " + + "['item 1', 'item 2'], removed items: ['item 3']"); + verify(auditor).info(jobReferencingFilter3.getId(), "Filter [foo_filter] has been modified; added items: " + + "['item 1', 'item 2'], removed items: ['item 3']"); + Mockito.verifyNoMoreInteractions(auditor, updateJobProcessNotifier); + } + + public void testNotifyFilterChangedGivenOnlyAddedItems() { + Detector.Builder detectorReferencingFilter = new Detector.Builder("count", null); + detectorReferencingFilter.setByFieldName("foo"); + DetectionRule filterRule = new DetectionRule.Builder(RuleScope.builder().exclude("foo", "foo_filter")).build(); + detectorReferencingFilter.setRules(Collections.singletonList(filterRule)); + AnalysisConfig.Builder filterAnalysisConfig = new AnalysisConfig.Builder(Collections.singletonList( + detectorReferencingFilter.build())); + + Job.Builder jobReferencingFilter = buildJobBuilder("job-referencing-filter"); + jobReferencingFilter.setAnalysisConfig(filterAnalysisConfig); + + MlMetadata.Builder mlMetadata = new MlMetadata.Builder(); + mlMetadata.putJob(jobReferencingFilter.build(), false); + + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")) + .metaData(MetaData.builder() + .putCustom(MLMetadataField.TYPE, mlMetadata.build())) + .build(); + when(clusterService.state()).thenReturn(clusterState); + + JobManager jobManager = createJobManager(); + + MlFilter filter = MlFilter.builder("foo_filter").build(); + + jobManager.notifyFilterChanged(filter, new TreeSet<>(Arrays.asList("a", "b")), Collections.emptySet()); + + verify(auditor).info(jobReferencingFilter.getId(), "Filter [foo_filter] has been modified; added items: ['a', 'b']"); + Mockito.verifyNoMoreInteractions(auditor, updateJobProcessNotifier); + } + + public void testNotifyFilterChangedGivenOnlyRemovedItems() { + Detector.Builder detectorReferencingFilter = new Detector.Builder("count", null); + detectorReferencingFilter.setByFieldName("foo"); + DetectionRule filterRule = new DetectionRule.Builder(RuleScope.builder().exclude("foo", "foo_filter")).build(); + detectorReferencingFilter.setRules(Collections.singletonList(filterRule)); + AnalysisConfig.Builder filterAnalysisConfig = new AnalysisConfig.Builder(Collections.singletonList( + detectorReferencingFilter.build())); + + Job.Builder jobReferencingFilter = buildJobBuilder("job-referencing-filter"); + jobReferencingFilter.setAnalysisConfig(filterAnalysisConfig); + + MlMetadata.Builder mlMetadata = new MlMetadata.Builder(); + mlMetadata.putJob(jobReferencingFilter.build(), false); + + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")) + .metaData(MetaData.builder() + .putCustom(MLMetadataField.TYPE, mlMetadata.build())) + .build(); + when(clusterService.state()).thenReturn(clusterState); + + JobManager jobManager = createJobManager(); + + MlFilter filter = MlFilter.builder("foo_filter").build(); + + jobManager.notifyFilterChanged(filter, Collections.emptySet(), new TreeSet<>(Arrays.asList("a", "b"))); + + verify(auditor).info(jobReferencingFilter.getId(), "Filter [foo_filter] has been modified; removed items: ['a', 'b']"); + Mockito.verifyNoMoreInteractions(auditor, updateJobProcessNotifier); } public void testUpdateProcessOnCalendarChanged() { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.update_filter.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.update_filter.json new file mode 100644 index 00000000000..06aceea4c12 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.update_filter.json @@ -0,0 +1,20 @@ +{ + "xpack.ml.update_filter": { + "methods": [ "POST" ], + "url": { + "path": "/_xpack/ml/filters/{filter_id}/_update", + "paths": [ "/_xpack/ml/filters/{filter_id}/_update" ], + "parts": { + "filter_id": { + "type": "string", + "required": true, + "description": "The ID of the filter to update" + } + } + }, + "body": { + "description" : "The filter update", + "required" : true + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/custom_all_field.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/custom_all_field.yml index ffbbf4d95bd..c206a08e6ca 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/custom_all_field.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/custom_all_field.yml @@ -30,6 +30,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-custom-all-test-1 type: doc @@ -56,6 +57,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-custom-all-test-2 type: doc diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/delete_model_snapshot.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/delete_model_snapshot.yml index 1a587c47fd5..c13b2473cc7 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/delete_model_snapshot.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/delete_model_snapshot.yml @@ -34,6 +34,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-delete-model-snapshot type: doc @@ -76,6 +77,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-delete-model-snapshot type: doc diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/filter_crud.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/filter_crud.yml index 4c184d34c99..d787e07b8c2 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/filter_crud.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/filter_crud.yml @@ -4,6 +4,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-meta type: doc @@ -112,25 +113,25 @@ setup: "Test create filter api": - do: xpack.ml.put_filter: - filter_id: filter-foo2 + filter_id: new-filter body: > { "description": "A newly created filter", "items": ["abc", "xyz"] } - - match: { filter_id: filter-foo2 } + - match: { filter_id: new-filter } - match: { description: "A newly created filter" } - match: { items: ["abc", "xyz"]} - do: xpack.ml.get_filters: - filter_id: "filter-foo2" + filter_id: "new-filter" - match: { count: 1 } - match: filters.0: - filter_id: "filter-foo2" + filter_id: "new-filter" description: "A newly created filter" items: ["abc", "xyz"] @@ -146,6 +147,65 @@ setup: "items": ["abc", "xyz"] } +--- +"Test update filter given no filter matches filter_id": + - do: + catch: missing + xpack.ml.update_filter: + filter_id: "missing_filter" + body: > + { + } + +--- +"Test update filter": + - do: + xpack.ml.put_filter: + filter_id: "test_update_filter" + body: > + { + "description": "old description", + "items": ["a", "b"] + } + - match: { filter_id: test_update_filter } + + - do: + xpack.ml.update_filter: + filter_id: "test_update_filter" + body: > + { + "description": "new description", + "add_items": ["c", "d"], + "remove_items": ["a"] + } + - match: { filter_id: test_update_filter } + - match: { description: "new description" } + - match: { items: ["b", "c", "d"] } + + - do: + xpack.ml.get_filters: + filter_id: "test_update_filter" + - match: + filters.0: + filter_id: "test_update_filter" + description: "new description" + items: ["b", "c", "d"] + + - do: + xpack.ml.delete_filter: + filter_id: test_update_filter + +--- +"Test update filter given remove item is not present": + - do: + catch: /Cannot remove item \[not present item\] as it is not present in filter \[filter-foo\]/ + xpack.ml.update_filter: + filter_id: "filter-foo" + body: > + { + "remove_items": ["not present item"] + } + --- "Test delete in-use filter": - do: diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/get_model_snapshots.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/get_model_snapshots.yml index 57cc80ae2fb..e411251363b 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/get_model_snapshots.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/get_model_snapshots.yml @@ -18,6 +18,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-get-model-snapshots type: doc @@ -33,6 +34,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-state type: doc @@ -44,6 +46,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-get-model-snapshots type: doc diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/index_layout.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/index_layout.yml index c13ae86e06f..6a60bbb96da 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/index_layout.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/index_layout.yml @@ -556,6 +556,8 @@ - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json + index: index: .ml-anomalies-shared type: doc diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_crud.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_crud.yml index df505176ae7..3b08753e209 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_crud.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_crud.yml @@ -419,6 +419,8 @@ - match: { job_id: "jobs-crud-model-memory-limit-decrease" } - do: + headers: + Content-Type: application/json index: index: .ml-anomalies-shared type: doc @@ -929,6 +931,8 @@ "Test cannot create job with existing result document": - do: + headers: + Content-Type: application/json index: index: .ml-anomalies-shared type: doc diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_buckets.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_buckets.yml index 2a7a7970e5d..125f8cbf7f8 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_buckets.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_buckets.yml @@ -18,6 +18,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-jobs-get-result-buckets type: doc @@ -34,6 +35,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-jobs-get-result-buckets type: doc @@ -50,6 +52,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-jobs-get-result-buckets type: doc diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_categories.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_categories.yml index 565f1612f89..307a1d0a80d 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_categories.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_categories.yml @@ -18,6 +18,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-jobs-get-result-categories type: doc @@ -26,6 +27,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-jobs-get-result-categories type: doc @@ -34,6 +36,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-unrelated type: doc diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_influencers.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_influencers.yml index 50f0cfc6816..9b875fb1afd 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_influencers.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_influencers.yml @@ -18,6 +18,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-get-influencers-test type: doc @@ -36,6 +37,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-get-influencers-test type: doc @@ -55,6 +57,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-get-influencers-test type: doc diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_overall_buckets.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_overall_buckets.yml index 75f35f31117..249ff7c72d7 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_overall_buckets.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_overall_buckets.yml @@ -59,6 +59,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-shared type: doc @@ -75,6 +76,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-shared type: doc @@ -91,6 +93,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-shared type: doc @@ -123,6 +126,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-shared type: doc @@ -139,6 +143,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-shared type: doc @@ -155,6 +160,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-shared type: doc @@ -171,6 +177,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-shared type: doc @@ -187,6 +194,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-shared type: doc @@ -203,6 +211,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-shared type: doc diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_records.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_records.yml index b5dae2045f4..513e1fb8757 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_records.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_records.yml @@ -18,6 +18,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-jobs-get-result-records type: doc @@ -34,6 +35,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-jobs-get-result-records type: doc diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_stats.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_stats.yml index 61bcf63e398..b841c8c2306 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_stats.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_stats.yml @@ -226,6 +226,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-shared type: doc @@ -250,6 +251,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-shared type: doc diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/ml_anomalies_default_mappings.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/ml_anomalies_default_mappings.yml index 42fca7b81a0..0f016132037 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/ml_anomalies_default_mappings.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/ml_anomalies_default_mappings.yml @@ -19,6 +19,7 @@ - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-shared type: doc diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/revert_model_snapshot.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/revert_model_snapshot.yml index a66c0da12d0..ce638fdceaa 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/revert_model_snapshot.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/revert_model_snapshot.yml @@ -34,6 +34,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-revert-model-snapshot type: doc @@ -61,6 +62,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-revert-model-snapshot type: doc @@ -88,6 +90,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-revert-model-snapshot type: doc @@ -103,6 +106,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-revert-model-snapshot type: doc @@ -118,6 +122,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-revert-model-snapshot type: doc @@ -133,6 +138,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-revert-model-snapshot type: doc @@ -148,6 +154,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-revert-model-snapshot type: doc @@ -163,6 +170,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-revert-model-snapshot type: doc @@ -180,6 +188,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-revert-model-snapshot type: doc diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/update_model_snapshot.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/update_model_snapshot.yml index 6a1d6e117e9..9966ae668c0 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/update_model_snapshot.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/update_model_snapshot.yml @@ -18,6 +18,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-update-model-snapshot type: doc @@ -67,6 +68,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-update-model-snapshot type: doc diff --git a/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DetectionRulesIT.java b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DetectionRulesIT.java index fbda8ad716b..7f018f967fb 100644 --- a/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DetectionRulesIT.java +++ b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DetectionRulesIT.java @@ -11,6 +11,7 @@ import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.xpack.core.ml.action.GetRecordsAction; +import org.elasticsearch.xpack.core.ml.action.UpdateFilterAction; import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; import org.elasticsearch.xpack.core.ml.job.config.DataDescription; import org.elasticsearch.xpack.core.ml.job.config.DetectionRule; @@ -34,6 +35,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.isOneOf; @@ -177,10 +179,12 @@ public class DetectionRulesIT extends MlNativeAutodetectIntegTestCase { assertThat(records.get(0).getOverFieldValue(), equalTo("333.333.333.333")); // Now let's update the filter - MlFilter updatedFilter = MlFilter.builder(safeIps.getId()).setItems("333.333.333.333").build(); - assertThat(putMlFilter(updatedFilter).getFilter(), equalTo(updatedFilter)); + UpdateFilterAction.Request updateFilterRequest = new UpdateFilterAction.Request(safeIps.getId()); + updateFilterRequest.setRemoveItems(safeIps.getItems()); + updateFilterRequest.setAddItems(Collections.singletonList("333.333.333.333")); + client().execute(UpdateFilterAction.INSTANCE, updateFilterRequest).get(); - // Wait until the notification that the process was updated is indexed + // Wait until the notification that the filter was updated is indexed assertBusy(() -> { SearchResponse searchResponse = client().prepareSearch(".ml-notifications") .setSize(1) @@ -191,7 +195,7 @@ public class DetectionRulesIT extends MlNativeAutodetectIntegTestCase { ).get(); SearchHit[] hits = searchResponse.getHits().getHits(); assertThat(hits.length, equalTo(1)); - assertThat(hits[0].getSourceAsMap().get("message"), equalTo("Updated filter [safe_ips] in running process")); + assertThat((String) hits[0].getSourceAsMap().get("message"), containsString("Filter [safe_ips] has been modified")); }); long secondAnomalyTime = timestamp; diff --git a/x-pack/qa/smoke-test-ml-with-security/build.gradle b/x-pack/qa/smoke-test-ml-with-security/build.gradle index ebe55c2b7ef..58e5eca3600 100644 --- a/x-pack/qa/smoke-test-ml-with-security/build.gradle +++ b/x-pack/qa/smoke-test-ml-with-security/build.gradle @@ -42,6 +42,7 @@ integTestRunner { 'ml/filter_crud/Test get filter API with bad ID', 'ml/filter_crud/Test invalid param combinations', 'ml/filter_crud/Test non-existing filter', + 'ml/filter_crud/Test update filter given remove item is not present', 'ml/get_datafeed_stats/Test get datafeed stats given missing datafeed_id', 'ml/get_datafeeds/Test get datafeed given missing datafeed_id', 'ml/jobs_crud/Test cannot create job with existing categorizer state document', From f023e95ae0ce4828085631b342d518c0e572a8aa Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Fri, 22 Jun 2018 16:17:17 +0200 Subject: [PATCH 4/8] Upgrade to Lucene 7.4.0. (#31529) This moves Elasticsearch from a recent 7.4.0 snapshot to the GA release. --- buildSrc/version.properties | 2 +- .../lucene-expressions-7.4.0-snapshot-518d303506.jar.sha1 | 1 - .../lang-expression/licenses/lucene-expressions-7.4.0.jar.sha1 | 1 + .../lucene-analyzers-icu-7.4.0-snapshot-518d303506.jar.sha1 | 1 - .../analysis-icu/licenses/lucene-analyzers-icu-7.4.0.jar.sha1 | 1 + ...lucene-analyzers-kuromoji-7.4.0-snapshot-518d303506.jar.sha1 | 1 - .../licenses/lucene-analyzers-kuromoji-7.4.0.jar.sha1 | 1 + .../lucene-analyzers-nori-7.4.0-snapshot-518d303506.jar.sha1 | 1 - .../analysis-nori/licenses/lucene-analyzers-nori-7.4.0.jar.sha1 | 1 + ...lucene-analyzers-phonetic-7.4.0-snapshot-518d303506.jar.sha1 | 1 - .../licenses/lucene-analyzers-phonetic-7.4.0.jar.sha1 | 1 + .../lucene-analyzers-smartcn-7.4.0-snapshot-518d303506.jar.sha1 | 1 - .../licenses/lucene-analyzers-smartcn-7.4.0.jar.sha1 | 1 + .../lucene-analyzers-stempel-7.4.0-snapshot-518d303506.jar.sha1 | 1 - .../licenses/lucene-analyzers-stempel-7.4.0.jar.sha1 | 1 + ...cene-analyzers-morfologik-7.4.0-snapshot-518d303506.jar.sha1 | 1 - .../licenses/lucene-analyzers-morfologik-7.4.0.jar.sha1 | 1 + .../lucene-analyzers-common-7.4.0-snapshot-518d303506.jar.sha1 | 1 - server/licenses/lucene-analyzers-common-7.4.0.jar.sha1 | 1 + .../lucene-backward-codecs-7.4.0-snapshot-518d303506.jar.sha1 | 1 - server/licenses/lucene-backward-codecs-7.4.0.jar.sha1 | 1 + server/licenses/lucene-core-7.4.0-snapshot-518d303506.jar.sha1 | 1 - server/licenses/lucene-core-7.4.0.jar.sha1 | 1 + .../licenses/lucene-grouping-7.4.0-snapshot-518d303506.jar.sha1 | 1 - server/licenses/lucene-grouping-7.4.0.jar.sha1 | 1 + .../lucene-highlighter-7.4.0-snapshot-518d303506.jar.sha1 | 1 - server/licenses/lucene-highlighter-7.4.0.jar.sha1 | 1 + server/licenses/lucene-join-7.4.0-snapshot-518d303506.jar.sha1 | 1 - server/licenses/lucene-join-7.4.0.jar.sha1 | 1 + .../licenses/lucene-memory-7.4.0-snapshot-518d303506.jar.sha1 | 1 - server/licenses/lucene-memory-7.4.0.jar.sha1 | 1 + server/licenses/lucene-misc-7.4.0-snapshot-518d303506.jar.sha1 | 1 - server/licenses/lucene-misc-7.4.0.jar.sha1 | 1 + .../licenses/lucene-queries-7.4.0-snapshot-518d303506.jar.sha1 | 1 - server/licenses/lucene-queries-7.4.0.jar.sha1 | 1 + .../lucene-queryparser-7.4.0-snapshot-518d303506.jar.sha1 | 1 - server/licenses/lucene-queryparser-7.4.0.jar.sha1 | 1 + .../licenses/lucene-sandbox-7.4.0-snapshot-518d303506.jar.sha1 | 1 - server/licenses/lucene-sandbox-7.4.0.jar.sha1 | 1 + .../licenses/lucene-spatial-7.4.0-snapshot-518d303506.jar.sha1 | 1 - server/licenses/lucene-spatial-7.4.0.jar.sha1 | 1 + .../lucene-spatial-extras-7.4.0-snapshot-518d303506.jar.sha1 | 1 - server/licenses/lucene-spatial-extras-7.4.0.jar.sha1 | 1 + .../lucene-spatial3d-7.4.0-snapshot-518d303506.jar.sha1 | 1 - server/licenses/lucene-spatial3d-7.4.0.jar.sha1 | 1 + .../licenses/lucene-suggest-7.4.0-snapshot-518d303506.jar.sha1 | 1 - server/licenses/lucene-suggest-7.4.0.jar.sha1 | 1 + .../licenses/lucene-core-7.4.0-snapshot-518d303506.jar.sha1 | 1 - x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0.jar.sha1 | 1 + 49 files changed, 25 insertions(+), 25 deletions(-) delete mode 100644 modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-518d303506.jar.sha1 create mode 100644 modules/lang-expression/licenses/lucene-expressions-7.4.0.jar.sha1 delete mode 100644 plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-518d303506.jar.sha1 create mode 100644 plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0.jar.sha1 delete mode 100644 plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-518d303506.jar.sha1 create mode 100644 plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0.jar.sha1 delete mode 100644 plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-518d303506.jar.sha1 create mode 100644 plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0.jar.sha1 delete mode 100644 plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-518d303506.jar.sha1 create mode 100644 plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0.jar.sha1 delete mode 100644 plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-518d303506.jar.sha1 create mode 100644 plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0.jar.sha1 delete mode 100644 plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-518d303506.jar.sha1 create mode 100644 plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0.jar.sha1 delete mode 100644 plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-518d303506.jar.sha1 create mode 100644 plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0.jar.sha1 delete mode 100644 server/licenses/lucene-analyzers-common-7.4.0-snapshot-518d303506.jar.sha1 create mode 100644 server/licenses/lucene-analyzers-common-7.4.0.jar.sha1 delete mode 100644 server/licenses/lucene-backward-codecs-7.4.0-snapshot-518d303506.jar.sha1 create mode 100644 server/licenses/lucene-backward-codecs-7.4.0.jar.sha1 delete mode 100644 server/licenses/lucene-core-7.4.0-snapshot-518d303506.jar.sha1 create mode 100644 server/licenses/lucene-core-7.4.0.jar.sha1 delete mode 100644 server/licenses/lucene-grouping-7.4.0-snapshot-518d303506.jar.sha1 create mode 100644 server/licenses/lucene-grouping-7.4.0.jar.sha1 delete mode 100644 server/licenses/lucene-highlighter-7.4.0-snapshot-518d303506.jar.sha1 create mode 100644 server/licenses/lucene-highlighter-7.4.0.jar.sha1 delete mode 100644 server/licenses/lucene-join-7.4.0-snapshot-518d303506.jar.sha1 create mode 100644 server/licenses/lucene-join-7.4.0.jar.sha1 delete mode 100644 server/licenses/lucene-memory-7.4.0-snapshot-518d303506.jar.sha1 create mode 100644 server/licenses/lucene-memory-7.4.0.jar.sha1 delete mode 100644 server/licenses/lucene-misc-7.4.0-snapshot-518d303506.jar.sha1 create mode 100644 server/licenses/lucene-misc-7.4.0.jar.sha1 delete mode 100644 server/licenses/lucene-queries-7.4.0-snapshot-518d303506.jar.sha1 create mode 100644 server/licenses/lucene-queries-7.4.0.jar.sha1 delete mode 100644 server/licenses/lucene-queryparser-7.4.0-snapshot-518d303506.jar.sha1 create mode 100644 server/licenses/lucene-queryparser-7.4.0.jar.sha1 delete mode 100644 server/licenses/lucene-sandbox-7.4.0-snapshot-518d303506.jar.sha1 create mode 100644 server/licenses/lucene-sandbox-7.4.0.jar.sha1 delete mode 100644 server/licenses/lucene-spatial-7.4.0-snapshot-518d303506.jar.sha1 create mode 100644 server/licenses/lucene-spatial-7.4.0.jar.sha1 delete mode 100644 server/licenses/lucene-spatial-extras-7.4.0-snapshot-518d303506.jar.sha1 create mode 100644 server/licenses/lucene-spatial-extras-7.4.0.jar.sha1 delete mode 100644 server/licenses/lucene-spatial3d-7.4.0-snapshot-518d303506.jar.sha1 create mode 100644 server/licenses/lucene-spatial3d-7.4.0.jar.sha1 delete mode 100644 server/licenses/lucene-suggest-7.4.0-snapshot-518d303506.jar.sha1 create mode 100644 server/licenses/lucene-suggest-7.4.0.jar.sha1 delete mode 100644 x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0-snapshot-518d303506.jar.sha1 create mode 100644 x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0.jar.sha1 diff --git a/buildSrc/version.properties b/buildSrc/version.properties index d89ffa78ed8..17e5cb5ff01 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ elasticsearch = 7.0.0-alpha1 -lucene = 7.4.0-snapshot-518d303506 +lucene = 7.4.0 # optional dependencies spatial4j = 0.7 diff --git a/modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-518d303506.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-518d303506.jar.sha1 deleted file mode 100644 index 2e666a2d566..00000000000 --- a/modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-518d303506.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a57659a275921d8ab3f7ec580e9bf713ce6143b1 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-7.4.0.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-7.4.0.jar.sha1 new file mode 100644 index 00000000000..2b14a61f264 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-7.4.0.jar.sha1 @@ -0,0 +1 @@ +9f0a326f7ec1671ffb07f95b27f1a5812b7dc1c3 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-518d303506.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-518d303506.jar.sha1 deleted file mode 100644 index 03f1b7d27ae..00000000000 --- a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-518d303506.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b91a260d8d12ee4b3302a63059c73a34de0ce146 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0.jar.sha1 new file mode 100644 index 00000000000..b5291b30c7d --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0.jar.sha1 @@ -0,0 +1 @@ +394e811e9d9bf0b9fba837f7ceca9e8f3e39d1c2 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-518d303506.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-518d303506.jar.sha1 deleted file mode 100644 index 9a5c6669009..00000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-518d303506.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cc1ca9bd9e2c162dd1da8c2e7111913fd8033e48 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0.jar.sha1 new file mode 100644 index 00000000000..49f55bea5e6 --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0.jar.sha1 @@ -0,0 +1 @@ +5cd56acfa16ba20e19b5d21d90b510eada841431 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-518d303506.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-518d303506.jar.sha1 deleted file mode 100644 index cbf4f78c319..00000000000 --- a/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-518d303506.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2fa3662a10a9e085b1c7b87293d727422cbe6224 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0.jar.sha1 new file mode 100644 index 00000000000..c4b61b763b4 --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0.jar.sha1 @@ -0,0 +1 @@ +db7b56f4cf533ad9022d2312c5ee48331edccca3 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-518d303506.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-518d303506.jar.sha1 deleted file mode 100644 index bd5bf428b6d..00000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-518d303506.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -60aa50c11857e6739e68936cb45102562b2c46b4 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0.jar.sha1 new file mode 100644 index 00000000000..779cac97612 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0.jar.sha1 @@ -0,0 +1 @@ +e8dba4d28a595eab2e8fb6095d1ac5f2d3872144 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-518d303506.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-518d303506.jar.sha1 deleted file mode 100644 index a73900802ac..00000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-518d303506.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4586368007785a3be26db4b9ce404ffb8c76f350 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0.jar.sha1 new file mode 100644 index 00000000000..cf5c49a2759 --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0.jar.sha1 @@ -0,0 +1 @@ +1243c771ee824c46a3d66ae3e4256d919fc06fbe \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-518d303506.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-518d303506.jar.sha1 deleted file mode 100644 index bf0a50f7154..00000000000 --- a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-518d303506.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9c6d030ab2c148df7a6ba73a774ef4b8c720a6cb \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0.jar.sha1 new file mode 100644 index 00000000000..830b9ccf9cb --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0.jar.sha1 @@ -0,0 +1 @@ +c783794b0d20d8dc1285edc7701f386b1f0e2fb8 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-518d303506.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-518d303506.jar.sha1 deleted file mode 100644 index ba6ceb2aed9..00000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-518d303506.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8275bf8df2644d5fcec2963cf237d14b6e00fefe \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0.jar.sha1 new file mode 100644 index 00000000000..a96e05f5e3b --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0.jar.sha1 @@ -0,0 +1 @@ +9438efa504a89afb6cb4c66448c257f865164d23 \ No newline at end of file diff --git a/server/licenses/lucene-analyzers-common-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-analyzers-common-7.4.0-snapshot-518d303506.jar.sha1 deleted file mode 100644 index 4c0db7a735c..00000000000 --- a/server/licenses/lucene-analyzers-common-7.4.0-snapshot-518d303506.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -557d62d2b13d3dcb1810a1633e22625e42425425 \ No newline at end of file diff --git a/server/licenses/lucene-analyzers-common-7.4.0.jar.sha1 b/server/licenses/lucene-analyzers-common-7.4.0.jar.sha1 new file mode 100644 index 00000000000..928cc6dea04 --- /dev/null +++ b/server/licenses/lucene-analyzers-common-7.4.0.jar.sha1 @@ -0,0 +1 @@ +e1afb580df500626a1c695e0fc9a7e8a8f58bcac \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-backward-codecs-7.4.0-snapshot-518d303506.jar.sha1 deleted file mode 100644 index 0579316096a..00000000000 --- a/server/licenses/lucene-backward-codecs-7.4.0-snapshot-518d303506.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d3755ad4c98b49fe5055b32358e3071727177c03 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-7.4.0.jar.sha1 b/server/licenses/lucene-backward-codecs-7.4.0.jar.sha1 new file mode 100644 index 00000000000..a94663119e7 --- /dev/null +++ b/server/licenses/lucene-backward-codecs-7.4.0.jar.sha1 @@ -0,0 +1 @@ +a6ad941ef1fdad48673ed511631b7e48a9456bf7 \ No newline at end of file diff --git a/server/licenses/lucene-core-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-core-7.4.0-snapshot-518d303506.jar.sha1 deleted file mode 100644 index 134072bc137..00000000000 --- a/server/licenses/lucene-core-7.4.0-snapshot-518d303506.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c1bbf611535f0b0fd0ba14e8da67c8d645b95244 \ No newline at end of file diff --git a/server/licenses/lucene-core-7.4.0.jar.sha1 b/server/licenses/lucene-core-7.4.0.jar.sha1 new file mode 100644 index 00000000000..80ba6c76aa3 --- /dev/null +++ b/server/licenses/lucene-core-7.4.0.jar.sha1 @@ -0,0 +1 @@ +730d9ac80436c8cbc0b2a8a749259be536b97316 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-grouping-7.4.0-snapshot-518d303506.jar.sha1 deleted file mode 100644 index 8a3327cc8a2..00000000000 --- a/server/licenses/lucene-grouping-7.4.0-snapshot-518d303506.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b62ebd53bbefb2f59cd246157a6768cae8a5a3a1 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-7.4.0.jar.sha1 b/server/licenses/lucene-grouping-7.4.0.jar.sha1 new file mode 100644 index 00000000000..5b781d26829 --- /dev/null +++ b/server/licenses/lucene-grouping-7.4.0.jar.sha1 @@ -0,0 +1 @@ +56f99858a4421a517b52da36a222debcccab80c6 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-highlighter-7.4.0-snapshot-518d303506.jar.sha1 deleted file mode 100644 index 75fb5a77556..00000000000 --- a/server/licenses/lucene-highlighter-7.4.0-snapshot-518d303506.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cba0fd4ccb98db8a72287a95d6b653e455f9eeb3 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-7.4.0.jar.sha1 b/server/licenses/lucene-highlighter-7.4.0.jar.sha1 new file mode 100644 index 00000000000..e1ebb95fe1b --- /dev/null +++ b/server/licenses/lucene-highlighter-7.4.0.jar.sha1 @@ -0,0 +1 @@ +5266b45d7f049662817d739881765904621876d0 \ No newline at end of file diff --git a/server/licenses/lucene-join-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-join-7.4.0-snapshot-518d303506.jar.sha1 deleted file mode 100644 index 01e0197bc17..00000000000 --- a/server/licenses/lucene-join-7.4.0-snapshot-518d303506.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5127ed0b7516f8b28d84e837df4f33c67e361f6c \ No newline at end of file diff --git a/server/licenses/lucene-join-7.4.0.jar.sha1 b/server/licenses/lucene-join-7.4.0.jar.sha1 new file mode 100644 index 00000000000..ff81c33c3f8 --- /dev/null +++ b/server/licenses/lucene-join-7.4.0.jar.sha1 @@ -0,0 +1 @@ +c77154d18c4944ceb6ce0741060632f57d623fdc \ No newline at end of file diff --git a/server/licenses/lucene-memory-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-memory-7.4.0-snapshot-518d303506.jar.sha1 deleted file mode 100644 index 3d6069f2a5c..00000000000 --- a/server/licenses/lucene-memory-7.4.0-snapshot-518d303506.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -45c7b13aae1104f9f5f0fca0606e5741309c8d74 \ No newline at end of file diff --git a/server/licenses/lucene-memory-7.4.0.jar.sha1 b/server/licenses/lucene-memory-7.4.0.jar.sha1 new file mode 100644 index 00000000000..7c0117dff6b --- /dev/null +++ b/server/licenses/lucene-memory-7.4.0.jar.sha1 @@ -0,0 +1 @@ +186ff981feec1bdbf1a6236e786ec171b5fbe3e0 \ No newline at end of file diff --git a/server/licenses/lucene-misc-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-misc-7.4.0-snapshot-518d303506.jar.sha1 deleted file mode 100644 index a74be59aea3..00000000000 --- a/server/licenses/lucene-misc-7.4.0-snapshot-518d303506.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2540c4b5d9dca8a39a3b4d58efe4ab484df7254f \ No newline at end of file diff --git a/server/licenses/lucene-misc-7.4.0.jar.sha1 b/server/licenses/lucene-misc-7.4.0.jar.sha1 new file mode 100644 index 00000000000..5cdf6810fa5 --- /dev/null +++ b/server/licenses/lucene-misc-7.4.0.jar.sha1 @@ -0,0 +1 @@ +bf844bb6f6d84da19e8c79ce5fbb4cf6d00f2611 \ No newline at end of file diff --git a/server/licenses/lucene-queries-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-queries-7.4.0-snapshot-518d303506.jar.sha1 deleted file mode 100644 index cf26412b63f..00000000000 --- a/server/licenses/lucene-queries-7.4.0-snapshot-518d303506.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e9d0c0c020917d4bf9b590526866ff5547dbaa17 \ No newline at end of file diff --git a/server/licenses/lucene-queries-7.4.0.jar.sha1 b/server/licenses/lucene-queries-7.4.0.jar.sha1 new file mode 100644 index 00000000000..19889037937 --- /dev/null +++ b/server/licenses/lucene-queries-7.4.0.jar.sha1 @@ -0,0 +1 @@ +229a50e6d9d4db076f671c230d493000c6e2972c \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-queryparser-7.4.0-snapshot-518d303506.jar.sha1 deleted file mode 100644 index 63533b77467..00000000000 --- a/server/licenses/lucene-queryparser-7.4.0-snapshot-518d303506.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -50969cdb7279047fbec94dda6e7d74d1c73c07f8 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-7.4.0.jar.sha1 b/server/licenses/lucene-queryparser-7.4.0.jar.sha1 new file mode 100644 index 00000000000..afdc275afe2 --- /dev/null +++ b/server/licenses/lucene-queryparser-7.4.0.jar.sha1 @@ -0,0 +1 @@ +8e58add0d0c39df97d07c8e343041989bf4b3a3f \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-sandbox-7.4.0-snapshot-518d303506.jar.sha1 deleted file mode 100644 index 4eab31d62bd..00000000000 --- a/server/licenses/lucene-sandbox-7.4.0-snapshot-518d303506.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -94524b293572b1f0d01a0faeeade1ff24713f966 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-7.4.0.jar.sha1 b/server/licenses/lucene-sandbox-7.4.0.jar.sha1 new file mode 100644 index 00000000000..81ae3bddd07 --- /dev/null +++ b/server/licenses/lucene-sandbox-7.4.0.jar.sha1 @@ -0,0 +1 @@ +1692604fa06a945d1ee19939022ef1a912235db3 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-spatial-7.4.0-snapshot-518d303506.jar.sha1 deleted file mode 100644 index ae5a2ea0375..00000000000 --- a/server/licenses/lucene-spatial-7.4.0-snapshot-518d303506.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -878db723e41ece636ed338c4ef374e900f221a14 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-7.4.0.jar.sha1 b/server/licenses/lucene-spatial-7.4.0.jar.sha1 new file mode 100644 index 00000000000..cc3f31340b9 --- /dev/null +++ b/server/licenses/lucene-spatial-7.4.0.jar.sha1 @@ -0,0 +1 @@ +847d2f897961124e2fc7d5e55d8309635bb026bc \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-spatial-extras-7.4.0-snapshot-518d303506.jar.sha1 deleted file mode 100644 index 9f5129d8905..00000000000 --- a/server/licenses/lucene-spatial-extras-7.4.0-snapshot-518d303506.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c8dc85c32aeac6ff320aa6a9ea57881ad4847a55 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-7.4.0.jar.sha1 b/server/licenses/lucene-spatial-extras-7.4.0.jar.sha1 new file mode 100644 index 00000000000..3f05790e430 --- /dev/null +++ b/server/licenses/lucene-spatial-extras-7.4.0.jar.sha1 @@ -0,0 +1 @@ +586892eefc0546643d7f5d7f83659c7db0d534ff \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-spatial3d-7.4.0-snapshot-518d303506.jar.sha1 deleted file mode 100644 index 02fcef681fc..00000000000 --- a/server/licenses/lucene-spatial3d-7.4.0-snapshot-518d303506.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -203d8d22ab172e624784a5fdeaecdd01ae25fb3d \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-7.4.0.jar.sha1 b/server/licenses/lucene-spatial3d-7.4.0.jar.sha1 new file mode 100644 index 00000000000..8c767b16c53 --- /dev/null +++ b/server/licenses/lucene-spatial3d-7.4.0.jar.sha1 @@ -0,0 +1 @@ +32cd2854f39ff453a5d128ce40e11eea4168abbf \ No newline at end of file diff --git a/server/licenses/lucene-suggest-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-suggest-7.4.0-snapshot-518d303506.jar.sha1 deleted file mode 100644 index a7daa7ff02a..00000000000 --- a/server/licenses/lucene-suggest-7.4.0-snapshot-518d303506.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4d6cf8fa1064a86991d5cd12a2ed32119ac91212 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-7.4.0.jar.sha1 b/server/licenses/lucene-suggest-7.4.0.jar.sha1 new file mode 100644 index 00000000000..59d59cf7941 --- /dev/null +++ b/server/licenses/lucene-suggest-7.4.0.jar.sha1 @@ -0,0 +1 @@ +0cdc1a512032f8b23dd4b1add0f5cd06325addc3 \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0-snapshot-518d303506.jar.sha1 b/x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0-snapshot-518d303506.jar.sha1 deleted file mode 100644 index 134072bc137..00000000000 --- a/x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0-snapshot-518d303506.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c1bbf611535f0b0fd0ba14e8da67c8d645b95244 \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0.jar.sha1 b/x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0.jar.sha1 new file mode 100644 index 00000000000..80ba6c76aa3 --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0.jar.sha1 @@ -0,0 +1 @@ +730d9ac80436c8cbc0b2a8a749259be536b97316 \ No newline at end of file From 59e7c6411a04f08a325f02d612e12eab12b22316 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Fri, 22 Jun 2018 07:36:03 -0700 Subject: [PATCH 5/8] Core: Combine messageRecieved methods in TransportRequestHandler (#31519) TransportRequestHandler currently contains 2 messageReceived methods, one which takes a Task, and one that does not. The first just delegates to the second. This commit changes all existing implementors of TransportRequestHandler to implement the version which takes Task, thus allowing the class to be a functional interface, and eliminating the need to throw exceptions when a task needs to be ensured. --- .../netty4/Netty4ScheduledPingTests.java | 3 +- ...rossClusterSearchUnavailableClusterIT.java | 4 +- .../liveness/TransportLivenessAction.java | 3 +- .../cancel/TransportCancelTasksAction.java | 3 +- .../action/search/SearchTransportService.java | 165 +++++++----------- .../support/HandledTransportAction.java | 5 - .../broadcast/TransportBroadcastAction.java | 5 - .../node/TransportBroadcastByNodeAction.java | 2 +- .../support/nodes/TransportNodesAction.java | 6 - .../TransportReplicationAction.java | 16 -- ...ransportInstanceSingleOperationAction.java | 3 +- .../shard/TransportSingleShardAction.java | 5 +- .../support/tasks/TransportTasksAction.java | 2 +- .../index/NodeMappingRefreshAction.java | 3 +- .../action/shard/ShardStateAction.java | 5 +- .../discovery/zen/MasterFaultDetection.java | 3 +- .../discovery/zen/MembershipAction.java | 7 +- .../discovery/zen/NodesFaultDetection.java | 3 +- .../zen/PublishClusterStateAction.java | 5 +- .../discovery/zen/UnicastZenPing.java | 3 +- .../discovery/zen/ZenDiscovery.java | 3 +- .../gateway/LocalAllocateDangledIndices.java | 3 +- .../indices/flush/SyncedFlushService.java | 7 +- .../recovery/PeerRecoverySourceService.java | 3 +- .../recovery/PeerRecoveryTargetService.java | 22 ++- .../indices/store/IndicesStore.java | 3 +- .../VerifyNodeRepositoryAction.java | 3 +- .../transport/RequestHandlerRegistry.java | 2 +- .../TaskAwareTransportRequestHandler.java | 30 ---- .../transport/TransportActionProxy.java | 3 +- .../transport/TransportRequestHandler.java | 9 +- .../transport/TransportService.java | 2 +- .../action/IndicesRequestIT.java | 5 - .../TransportBroadcastByNodeActionTests.java | 2 +- .../TransportClientNodesServiceTests.java | 3 +- .../discovery/zen/ZenDiscoveryUnitTests.java | 4 +- .../RemoteClusterConnectionTests.java | 4 +- .../transport/TransportActionProxyTests.java | 14 +- .../AbstractSimpleTransportTestCase.java | 63 +++---- .../action/TransportRollupSearchAction.java | 5 - .../SecurityServerTransportInterceptor.java | 5 - 41 files changed, 175 insertions(+), 271 deletions(-) delete mode 100644 server/src/main/java/org/elasticsearch/transport/TaskAwareTransportRequestHandler.java diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4ScheduledPingTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4ScheduledPingTests.java index b967a7ea410..bd62ff0af0b 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4ScheduledPingTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4ScheduledPingTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.TestThreadPool; @@ -91,7 +92,7 @@ public class Netty4ScheduledPingTests extends ESTestCase { serviceA.registerRequestHandler("sayHello", TransportRequest.Empty::new, ThreadPool.Names.GENERIC, new TransportRequestHandler() { @Override - public void messageReceived(TransportRequest.Empty request, TransportChannel channel) { + public void messageReceived(TransportRequest.Empty request, TransportChannel channel, Task task) { try { channel.sendResponse(TransportResponse.Empty.INSTANCE, TransportResponseOptions.EMPTY); } catch (IOException e) { diff --git a/qa/ccs-unavailable-clusters/src/test/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java b/qa/ccs-unavailable-clusters/src/test/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java index 73df782c920..29aec900cef 100644 --- a/qa/ccs-unavailable-clusters/src/test/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java +++ b/qa/ccs-unavailable-clusters/src/test/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java @@ -103,12 +103,12 @@ public class CrossClusterSearchUnavailableClusterIT extends ESRestTestCase { MockTransportService newService = MockTransportService.createNewService(s, version, threadPool, null); try { newService.registerRequestHandler(ClusterSearchShardsAction.NAME, ThreadPool.Names.SAME, ClusterSearchShardsRequest::new, - (request, channel) -> { + (request, channel, task) -> { channel.sendResponse(new ClusterSearchShardsResponse(new ClusterSearchShardsGroup[0], knownNodes.toArray(new DiscoveryNode[0]), Collections.emptyMap())); }); newService.registerRequestHandler(ClusterStateAction.NAME, ThreadPool.Names.SAME, ClusterStateRequest::new, - (request, channel) -> { + (request, channel, task) -> { DiscoveryNodes.Builder builder = DiscoveryNodes.builder(); for (DiscoveryNode node : knownNodes) { builder.add(node); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/liveness/TransportLivenessAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/liveness/TransportLivenessAction.java index 09c608ac842..ef8014cade4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/liveness/TransportLivenessAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/liveness/TransportLivenessAction.java @@ -21,6 +21,7 @@ package org.elasticsearch.action.admin.cluster.node.liveness; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportRequestHandler; @@ -39,7 +40,7 @@ public final class TransportLivenessAction implements TransportRequestHandler
  • { @Override - public void messageReceived(final BanParentTaskRequest request, final TransportChannel channel) throws Exception { + public void messageReceived(final BanParentTaskRequest request, final TransportChannel channel, Task task) throws Exception { if (request.ban) { logger.debug("Received ban for the parent [{}] on the node [{}], reason: [{}]", request.parentTaskId, clusterService.localNode().getId(), request.reason); diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java index 8a4c8b0882f..dd43b82f8b8 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java @@ -45,13 +45,10 @@ import org.elasticsearch.search.internal.ShardSearchTransportRequest; import org.elasticsearch.search.query.QuerySearchRequest; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.search.query.ScrollQuerySearchResult; -import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.RemoteClusterService; -import org.elasticsearch.transport.TaskAwareTransportRequestHandler; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportActionProxy; -import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestOptions; @@ -314,150 +311,116 @@ public class SearchTransportService extends AbstractComponent { public static void registerRequestHandler(TransportService transportService, SearchService searchService) { transportService.registerRequestHandler(FREE_CONTEXT_SCROLL_ACTION_NAME, ThreadPool.Names.SAME, ScrollFreeContextRequest::new, - new TaskAwareTransportRequestHandler() { - @Override - public void messageReceived(ScrollFreeContextRequest request, TransportChannel channel, Task task) throws Exception { - boolean freed = searchService.freeContext(request.id()); - channel.sendResponse(new SearchFreeContextResponse(freed)); - } - }); + (request, channel, task) -> { + boolean freed = searchService.freeContext(request.id()); + channel.sendResponse(new SearchFreeContextResponse(freed)); + }); TransportActionProxy.registerProxyAction(transportService, FREE_CONTEXT_SCROLL_ACTION_NAME, (Supplier) SearchFreeContextResponse::new); transportService.registerRequestHandler(FREE_CONTEXT_ACTION_NAME, ThreadPool.Names.SAME, SearchFreeContextRequest::new, - new TaskAwareTransportRequestHandler() { - @Override - public void messageReceived(SearchFreeContextRequest request, TransportChannel channel, Task task) throws Exception { - boolean freed = searchService.freeContext(request.id()); - channel.sendResponse(new SearchFreeContextResponse(freed)); - } - }); + (request, channel, task) -> { + boolean freed = searchService.freeContext(request.id()); + channel.sendResponse(new SearchFreeContextResponse(freed)); + }); TransportActionProxy.registerProxyAction(transportService, FREE_CONTEXT_ACTION_NAME, (Supplier) SearchFreeContextResponse::new); transportService.registerRequestHandler(CLEAR_SCROLL_CONTEXTS_ACTION_NAME, () -> TransportRequest.Empty.INSTANCE, - ThreadPool.Names.SAME, new TaskAwareTransportRequestHandler() { - @Override - public void messageReceived(TransportRequest.Empty request, TransportChannel channel, Task task) throws Exception { - searchService.freeAllScrollContexts(); - channel.sendResponse(TransportResponse.Empty.INSTANCE); - } - }); + ThreadPool.Names.SAME, (request, channel, task) -> { + searchService.freeAllScrollContexts(); + channel.sendResponse(TransportResponse.Empty.INSTANCE); + }); TransportActionProxy.registerProxyAction(transportService, CLEAR_SCROLL_CONTEXTS_ACTION_NAME, () -> TransportResponse.Empty.INSTANCE); transportService.registerRequestHandler(DFS_ACTION_NAME, ThreadPool.Names.SAME, ShardSearchTransportRequest::new, - new TaskAwareTransportRequestHandler() { - @Override - public void messageReceived(ShardSearchTransportRequest request, TransportChannel channel, Task task) throws Exception { - searchService.executeDfsPhase(request, (SearchTask) task, new ActionListener() { - @Override - public void onResponse(SearchPhaseResult searchPhaseResult) { - try { - channel.sendResponse(searchPhaseResult); - } catch (IOException e) { - throw new UncheckedIOException(e); - } + (request, channel, task) -> { + searchService.executeDfsPhase(request, (SearchTask) task, new ActionListener() { + @Override + public void onResponse(SearchPhaseResult searchPhaseResult) { + try { + channel.sendResponse(searchPhaseResult); + } catch (IOException e) { + throw new UncheckedIOException(e); } + } - @Override - public void onFailure(Exception e) { - try { - channel.sendResponse(e); - } catch (IOException e1) { - throw new UncheckedIOException(e1); - } + @Override + public void onFailure(Exception e) { + try { + channel.sendResponse(e); + } catch (IOException e1) { + throw new UncheckedIOException(e1); } - }); - - } + } + }); }); TransportActionProxy.registerProxyAction(transportService, DFS_ACTION_NAME, DfsSearchResult::new); transportService.registerRequestHandler(QUERY_ACTION_NAME, ThreadPool.Names.SAME, ShardSearchTransportRequest::new, - new TaskAwareTransportRequestHandler() { - @Override - public void messageReceived(ShardSearchTransportRequest request, TransportChannel channel, Task task) throws Exception { - searchService.executeQueryPhase(request, (SearchTask) task, new ActionListener() { - @Override - public void onResponse(SearchPhaseResult searchPhaseResult) { - try { - channel.sendResponse(searchPhaseResult); - } catch (IOException e) { - throw new UncheckedIOException(e); - } + (request, channel, task) -> { + searchService.executeQueryPhase(request, (SearchTask) task, new ActionListener() { + @Override + public void onResponse(SearchPhaseResult searchPhaseResult) { + try { + channel.sendResponse(searchPhaseResult); + } catch (IOException e) { + throw new UncheckedIOException(e); } + } - @Override - public void onFailure(Exception e) { - try { - channel.sendResponse(e); - } catch (IOException e1) { - throw new UncheckedIOException(e1); - } + @Override + public void onFailure(Exception e) { + try { + channel.sendResponse(e); + } catch (IOException e1) { + throw new UncheckedIOException(e1); } - }); - } + } + }); }); TransportActionProxy.registerProxyAction(transportService, QUERY_ACTION_NAME, (request) -> ((ShardSearchRequest)request).numberOfShards() == 1 ? QueryFetchSearchResult::new : QuerySearchResult::new); transportService.registerRequestHandler(QUERY_ID_ACTION_NAME, ThreadPool.Names.SEARCH, QuerySearchRequest::new, - new TaskAwareTransportRequestHandler() { - @Override - public void messageReceived(QuerySearchRequest request, TransportChannel channel, Task task) throws Exception { - QuerySearchResult result = searchService.executeQueryPhase(request, (SearchTask)task); - channel.sendResponse(result); - } + (request, channel, task) -> { + QuerySearchResult result = searchService.executeQueryPhase(request, (SearchTask)task); + channel.sendResponse(result); }); TransportActionProxy.registerProxyAction(transportService, QUERY_ID_ACTION_NAME, QuerySearchResult::new); transportService.registerRequestHandler(QUERY_SCROLL_ACTION_NAME, ThreadPool.Names.SEARCH, InternalScrollSearchRequest::new, - new TaskAwareTransportRequestHandler() { - @Override - public void messageReceived(InternalScrollSearchRequest request, TransportChannel channel, Task task) throws Exception { - ScrollQuerySearchResult result = searchService.executeQueryPhase(request, (SearchTask)task); - channel.sendResponse(result); - } + (request, channel, task) -> { + ScrollQuerySearchResult result = searchService.executeQueryPhase(request, (SearchTask)task); + channel.sendResponse(result); }); TransportActionProxy.registerProxyAction(transportService, QUERY_SCROLL_ACTION_NAME, ScrollQuerySearchResult::new); transportService.registerRequestHandler(QUERY_FETCH_SCROLL_ACTION_NAME, ThreadPool.Names.SEARCH, InternalScrollSearchRequest::new, - new TaskAwareTransportRequestHandler() { - @Override - public void messageReceived(InternalScrollSearchRequest request, TransportChannel channel, Task task) throws Exception { - ScrollQueryFetchSearchResult result = searchService.executeFetchPhase(request, (SearchTask)task); - channel.sendResponse(result); - } + (request, channel, task) -> { + ScrollQueryFetchSearchResult result = searchService.executeFetchPhase(request, (SearchTask)task); + channel.sendResponse(result); }); TransportActionProxy.registerProxyAction(transportService, QUERY_FETCH_SCROLL_ACTION_NAME, ScrollQueryFetchSearchResult::new); transportService.registerRequestHandler(FETCH_ID_SCROLL_ACTION_NAME, ThreadPool.Names.SEARCH, ShardFetchRequest::new, - new TaskAwareTransportRequestHandler() { - @Override - public void messageReceived(ShardFetchRequest request, TransportChannel channel, Task task) throws Exception { - FetchSearchResult result = searchService.executeFetchPhase(request, (SearchTask)task); - channel.sendResponse(result); - } + (request, channel, task) -> { + FetchSearchResult result = searchService.executeFetchPhase(request, (SearchTask)task); + channel.sendResponse(result); }); TransportActionProxy.registerProxyAction(transportService, FETCH_ID_SCROLL_ACTION_NAME, FetchSearchResult::new); transportService.registerRequestHandler(FETCH_ID_ACTION_NAME, ThreadPool.Names.SEARCH, ShardFetchSearchRequest::new, - new TaskAwareTransportRequestHandler() { - @Override - public void messageReceived(ShardFetchSearchRequest request, TransportChannel channel, Task task) throws Exception { - FetchSearchResult result = searchService.executeFetchPhase(request, (SearchTask)task); - channel.sendResponse(result); - } + (request, channel, task) -> { + FetchSearchResult result = searchService.executeFetchPhase(request, (SearchTask)task); + channel.sendResponse(result); }); TransportActionProxy.registerProxyAction(transportService, FETCH_ID_ACTION_NAME, FetchSearchResult::new); // this is cheap, it does not fetch during the rewrite phase, so we can let it quickly execute on a networking thread transportService.registerRequestHandler(QUERY_CAN_MATCH_NAME, ThreadPool.Names.SAME, ShardSearchTransportRequest::new, - new TaskAwareTransportRequestHandler() { - @Override - public void messageReceived(ShardSearchTransportRequest request, TransportChannel channel, Task task) throws Exception { - boolean canMatch = searchService.canMatch(request); - channel.sendResponse(new CanMatchResponse(canMatch)); - } + (request, channel, task) -> { + boolean canMatch = searchService.canMatch(request); + channel.sendResponse(new CanMatchResponse(canMatch)); }); TransportActionProxy.registerProxyAction(transportService, QUERY_CAN_MATCH_NAME, (Supplier) CanMatchResponse::new); diff --git a/server/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java b/server/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java index 7cdcd017b99..c55e0cff6f2 100644 --- a/server/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java @@ -64,11 +64,6 @@ public abstract class HandledTransportAction { - @Override - public final void messageReceived(Request request, TransportChannel channel) throws Exception { - throw new UnsupportedOperationException("the task parameter is required for this operation"); - } - @Override public final void messageReceived(final Request request, final TransportChannel channel, Task task) throws Exception { // We already got the task created on the network layer - no need to create it again on the transport layer diff --git a/server/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java b/server/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java index 8a28c2c9d89..1bec46fd121 100644 --- a/server/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java @@ -284,10 +284,5 @@ public abstract class TransportBroadcastAction { @Override - public void messageReceived(final NodeRequest request, TransportChannel channel) throws Exception { + public void messageReceived(final NodeRequest request, TransportChannel channel, Task task) throws Exception { List shards = request.getShards(); final int totalShards = shards.size(); if (logger.isTraceEnabled()) { diff --git a/server/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java b/server/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java index 7a074c91c71..6a9ac53f7be 100644 --- a/server/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java @@ -258,12 +258,6 @@ public abstract class TransportNodesAction> { @@ -286,11 +281,6 @@ public abstract class TransportReplicationAction< } - @Override - public void messageReceived(final ConcreteShardRequest request, final TransportChannel channel) throws Exception { - throw new UnsupportedOperationException("the task parameter is required for this operation"); - } - @Override public void messageReceived(ConcreteShardRequest request, TransportChannel channel, Task task) { new AsyncPrimaryAction(request.request, request.targetAllocationID, request.primaryTerm, channel, (ReplicationTask) task).run(); @@ -493,12 +483,6 @@ public abstract class TransportReplicationAction< public class ReplicaOperationTransportHandler implements TransportRequestHandler> { - @Override - public void messageReceived( - final ConcreteReplicaRequest replicaRequest, final TransportChannel channel) throws Exception { - throw new UnsupportedOperationException("the task parameter is required for this operation"); - } - @Override public void messageReceived( final ConcreteReplicaRequest replicaRequest, diff --git a/server/src/main/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationAction.java b/server/src/main/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationAction.java index 280a35207a9..2d8ccb6e524 100644 --- a/server/src/main/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationAction.java @@ -37,6 +37,7 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.node.NodeClosedException; +import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.TransportChannel; @@ -243,7 +244,7 @@ public abstract class TransportInstanceSingleOperationAction { @Override - public void messageReceived(final Request request, final TransportChannel channel) throws Exception { + public void messageReceived(final Request request, final TransportChannel channel, Task task) throws Exception { shardOperation(request, new ActionListener() { @Override public void onResponse(Response response) { diff --git a/server/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java b/server/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java index d7e5633559d..7116061640f 100644 --- a/server/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java @@ -40,6 +40,7 @@ import org.elasticsearch.common.logging.LoggerMessageFormat; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportException; @@ -271,7 +272,7 @@ public abstract class TransportSingleShardAction { @Override - public void messageReceived(Request request, final TransportChannel channel) throws Exception { + public void messageReceived(Request request, final TransportChannel channel, Task task) throws Exception { // if we have a local operation, execute it on a thread since we don't spawn execute(request, new ActionListener() { @Override @@ -298,7 +299,7 @@ public abstract class TransportSingleShardAction { @Override - public void messageReceived(final Request request, final TransportChannel channel) throws Exception { + public void messageReceived(final Request request, final TransportChannel channel, Task task) throws Exception { if (logger.isTraceEnabled()) { logger.trace("executing [{}] on shard [{}]", request, request.internalShardId); } diff --git a/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java b/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java index 5599dd5f98b..ee116d9f957 100644 --- a/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java @@ -338,7 +338,7 @@ public abstract class TransportTasksAction< class NodeTransportHandler implements TransportRequestHandler { @Override - public void messageReceived(final NodeTaskRequest request, final TransportChannel channel) throws Exception { + public void messageReceived(final NodeTaskRequest request, final TransportChannel channel, Task task) throws Exception { nodeOperation(request, new ActionListener() { @Override public void onResponse( diff --git a/server/src/main/java/org/elasticsearch/cluster/action/index/NodeMappingRefreshAction.java b/server/src/main/java/org/elasticsearch/cluster/action/index/NodeMappingRefreshAction.java index fc7a4206486..2559c14848d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/action/index/NodeMappingRefreshAction.java +++ b/server/src/main/java/org/elasticsearch/cluster/action/index/NodeMappingRefreshAction.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.EmptyTransportResponseHandler; import org.elasticsearch.transport.TransportChannel; @@ -65,7 +66,7 @@ public class NodeMappingRefreshAction extends AbstractComponent { private class NodeMappingRefreshTransportHandler implements TransportRequestHandler { @Override - public void messageReceived(NodeMappingRefreshRequest request, TransportChannel channel) throws Exception { + public void messageReceived(NodeMappingRefreshRequest request, TransportChannel channel, Task task) throws Exception { metaDataMappingService.refreshMapping(request.index(), request.indexUUID()); channel.sendResponse(TransportResponse.Empty.INSTANCE); } diff --git a/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java index f690efa4c9a..0949e47cd05 100644 --- a/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java +++ b/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java @@ -52,6 +52,7 @@ import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.node.NodeClosedException; +import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.EmptyTransportResponseHandler; @@ -237,7 +238,7 @@ public class ShardStateAction extends AbstractComponent { } @Override - public void messageReceived(FailedShardEntry request, TransportChannel channel) throws Exception { + public void messageReceived(FailedShardEntry request, TransportChannel channel, Task task) throws Exception { logger.debug(() -> new ParameterizedMessage("{} received shard failed for {}", request.shardId, request), request.failure); clusterService.submitStateUpdateTask( "shard-failed", @@ -487,7 +488,7 @@ public class ShardStateAction extends AbstractComponent { } @Override - public void messageReceived(StartedShardEntry request, TransportChannel channel) throws Exception { + public void messageReceived(StartedShardEntry request, TransportChannel channel, Task task) throws Exception { logger.debug("{} received shard started for [{}]", request.shardId, request); clusterService.submitStateUpdateTask( "shard-started " + request, diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/MasterFaultDetection.java b/server/src/main/java/org/elasticsearch/discovery/zen/MasterFaultDetection.java index c38cfe88619..5acf2effad3 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/MasterFaultDetection.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/MasterFaultDetection.java @@ -34,6 +34,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; +import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.TransportChannel; @@ -321,7 +322,7 @@ public class MasterFaultDetection extends FaultDetection { private class MasterPingRequestHandler implements TransportRequestHandler { @Override - public void messageReceived(final MasterPingRequest request, final TransportChannel channel) throws Exception { + public void messageReceived(final MasterPingRequest request, final TransportChannel channel, Task task) throws Exception { final DiscoveryNodes nodes = clusterStateSupplier.get().nodes(); // check if we are really the same master as the one we seemed to be think we are // this can happen if the master got "kill -9" and then another node started using the same port diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/MembershipAction.java b/server/src/main/java/org/elasticsearch/discovery/zen/MembershipAction.java index fdfcd8ac290..e8bafea66d3 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/MembershipAction.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/MembershipAction.java @@ -30,6 +30,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.EmptyTransportResponseHandler; import org.elasticsearch.transport.TransportChannel; @@ -133,7 +134,7 @@ public class MembershipAction extends AbstractComponent { private class JoinRequestRequestHandler implements TransportRequestHandler { @Override - public void messageReceived(final JoinRequest request, final TransportChannel channel) throws Exception { + public void messageReceived(final JoinRequest request, final TransportChannel channel, Task task) throws Exception { listener.onJoin(request.node, new JoinCallback() { @Override public void onSuccess() { @@ -190,7 +191,7 @@ public class MembershipAction extends AbstractComponent { } @Override - public void messageReceived(ValidateJoinRequest request, TransportChannel channel) throws Exception { + public void messageReceived(ValidateJoinRequest request, TransportChannel channel, Task task) throws Exception { DiscoveryNode node = localNodeSupplier.get(); assert node != null : "local node is null"; joinValidators.stream().forEach(action -> action.accept(node, request.state)); @@ -281,7 +282,7 @@ public class MembershipAction extends AbstractComponent { private class LeaveRequestRequestHandler implements TransportRequestHandler { @Override - public void messageReceived(LeaveRequest request, TransportChannel channel) throws Exception { + public void messageReceived(LeaveRequest request, TransportChannel channel, Task task) throws Exception { listener.onLeave(request.node); channel.sendResponse(TransportResponse.Empty.INSTANCE); } diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/NodesFaultDetection.java b/server/src/main/java/org/elasticsearch/discovery/zen/NodesFaultDetection.java index d19cc98441b..57e5cab020b 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/NodesFaultDetection.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/NodesFaultDetection.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; +import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.TransportChannel; @@ -276,7 +277,7 @@ public class NodesFaultDetection extends FaultDetection { class PingRequestHandler implements TransportRequestHandler { @Override - public void messageReceived(PingRequest request, TransportChannel channel) throws Exception { + public void messageReceived(PingRequest request, TransportChannel channel, Task task) throws Exception { // if we are not the node we are supposed to be pinged, send an exception // this can happen when a kill -9 is sent, and another node is started using the same port if (!localNode.equals(request.targetNode())) { diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/PublishClusterStateAction.java b/server/src/main/java/org/elasticsearch/discovery/zen/PublishClusterStateAction.java index 5398b2a057a..5e9f960e893 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/PublishClusterStateAction.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/PublishClusterStateAction.java @@ -45,6 +45,7 @@ import org.elasticsearch.discovery.AckClusterStatePublishResponseHandler; import org.elasticsearch.discovery.BlockingClusterStatePublishResponseHandler; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.DiscoverySettings; +import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.BytesTransportRequest; import org.elasticsearch.transport.EmptyTransportResponseHandler; @@ -447,14 +448,14 @@ public class PublishClusterStateAction extends AbstractComponent { private class SendClusterStateRequestHandler implements TransportRequestHandler { @Override - public void messageReceived(BytesTransportRequest request, final TransportChannel channel) throws Exception { + public void messageReceived(BytesTransportRequest request, final TransportChannel channel, Task task) throws Exception { handleIncomingClusterStateRequest(request, channel); } } private class CommitClusterStateRequestHandler implements TransportRequestHandler { @Override - public void messageReceived(CommitClusterStateRequest request, final TransportChannel channel) throws Exception { + public void messageReceived(CommitClusterStateRequest request, final TransportChannel channel, Task task) throws Exception { handleCommitRequest(request, channel); } } diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java b/server/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java index 9c86fa17e9b..74414dc446e 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java @@ -45,6 +45,7 @@ import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor; import org.elasticsearch.common.util.concurrent.KeyedLock; +import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.ConnectionProfile; @@ -563,7 +564,7 @@ public class UnicastZenPing extends AbstractComponent implements ZenPing { class UnicastPingRequestHandler implements TransportRequestHandler { @Override - public void messageReceived(UnicastPingRequest request, TransportChannel channel) throws Exception { + public void messageReceived(UnicastPingRequest request, TransportChannel channel, Task task) throws Exception { if (closed) { throw new AlreadyClosedException("node is shutting down"); } diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java b/server/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java index 55ecf7ca25f..eb9a9f8d488 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java @@ -56,6 +56,7 @@ import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.DiscoveryStats; import org.elasticsearch.discovery.zen.PublishClusterStateAction.IncomingClusterStateListener; +import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.EmptyTransportResponseHandler; import org.elasticsearch.transport.TransportChannel; @@ -1187,7 +1188,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover class RejoinClusterRequestHandler implements TransportRequestHandler { @Override - public void messageReceived(final RejoinClusterRequest request, final TransportChannel channel) throws Exception { + public void messageReceived(final RejoinClusterRequest request, final TransportChannel channel, Task task) throws Exception { try { channel.sendResponse(TransportResponse.Empty.INSTANCE); } catch (Exception e) { diff --git a/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java b/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java index c8986b04934..7bc2e38dde0 100644 --- a/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java +++ b/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java @@ -37,6 +37,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.MasterNotDiscoveredException; +import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportException; @@ -112,7 +113,7 @@ public class LocalAllocateDangledIndices extends AbstractComponent { class AllocateDangledRequestHandler implements TransportRequestHandler { @Override - public void messageReceived(final AllocateDangledRequest request, final TransportChannel channel) throws Exception { + public void messageReceived(final AllocateDangledRequest request, final TransportChannel channel, Task task) throws Exception { String[] indexNames = new String[request.indices.length]; for (int i = 0; i < request.indices.length; i++) { indexNames[i] = request.indices[i].getIndex().getName(); diff --git a/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java b/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java index 6ef6c1546d1..f01b4bb3121 100644 --- a/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java +++ b/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java @@ -54,6 +54,7 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardNotFoundException; import org.elasticsearch.indices.IndexClosedException; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportException; @@ -778,7 +779,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL private final class PreSyncedFlushTransportHandler implements TransportRequestHandler { @Override - public void messageReceived(PreShardSyncedFlushRequest request, TransportChannel channel) throws Exception { + public void messageReceived(PreShardSyncedFlushRequest request, TransportChannel channel, Task task) throws Exception { channel.sendResponse(performPreSyncedFlush(request)); } } @@ -786,7 +787,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL private final class SyncedFlushTransportHandler implements TransportRequestHandler { @Override - public void messageReceived(ShardSyncedFlushRequest request, TransportChannel channel) throws Exception { + public void messageReceived(ShardSyncedFlushRequest request, TransportChannel channel, Task task) throws Exception { channel.sendResponse(performSyncedFlush(request)); } } @@ -794,7 +795,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL private final class InFlightOpCountTransportHandler implements TransportRequestHandler { @Override - public void messageReceived(InFlightOpsRequest request, TransportChannel channel) throws Exception { + public void messageReceived(InFlightOpsRequest request, TransportChannel channel, Task task) throws Exception { channel.sendResponse(performInFlightOps(request)); } } diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoverySourceService.java b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoverySourceService.java index 51eabdd4e8c..06e8a5734f6 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoverySourceService.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoverySourceService.java @@ -30,6 +30,7 @@ import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportRequestHandler; @@ -103,7 +104,7 @@ public class PeerRecoverySourceService extends AbstractComponent implements Inde class StartRecoveryTransportRequestHandler implements TransportRequestHandler { @Override - public void messageReceived(final StartRecoveryRequest request, final TransportChannel channel) throws Exception { + public void messageReceived(final StartRecoveryRequest request, final TransportChannel channel, Task task) throws Exception { RecoveryResponse response = recover(request); channel.sendResponse(response); } diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java index cb49eed25f8..aaa4697e5cb 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java @@ -55,6 +55,7 @@ import org.elasticsearch.index.translog.Translog; import org.elasticsearch.index.translog.TranslogCorruptedException; import org.elasticsearch.indices.recovery.RecoveriesCollection.RecoveryRef; import org.elasticsearch.node.NodeClosedException; +import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.FutureTransportResponseHandler; @@ -397,7 +398,8 @@ public class PeerRecoveryTargetService extends AbstractComponent implements Inde class PrepareForTranslogOperationsRequestHandler implements TransportRequestHandler { @Override - public void messageReceived(RecoveryPrepareForTranslogOperationsRequest request, TransportChannel channel) throws Exception { + public void messageReceived(RecoveryPrepareForTranslogOperationsRequest request, TransportChannel channel, + Task task) throws Exception { try (RecoveryRef recoveryRef = onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId() )) { recoveryRef.target().prepareForTranslogOperations(request.isFileBasedRecovery(), request.totalTranslogOps()); @@ -409,7 +411,7 @@ public class PeerRecoveryTargetService extends AbstractComponent implements Inde class FinalizeRecoveryRequestHandler implements TransportRequestHandler { @Override - public void messageReceived(RecoveryFinalizeRecoveryRequest request, TransportChannel channel) throws Exception { + public void messageReceived(RecoveryFinalizeRecoveryRequest request, TransportChannel channel, Task task) throws Exception { try (RecoveryRef recoveryRef = onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId())) { recoveryRef.target().finalizeRecovery(request.globalCheckpoint()); @@ -421,7 +423,7 @@ public class PeerRecoveryTargetService extends AbstractComponent implements Inde class WaitForClusterStateRequestHandler implements TransportRequestHandler { @Override - public void messageReceived(RecoveryWaitForClusterStateRequest request, TransportChannel channel) throws Exception { + public void messageReceived(RecoveryWaitForClusterStateRequest request, TransportChannel channel, Task task) throws Exception { try (RecoveryRef recoveryRef = onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId() )) { recoveryRef.target().ensureClusterStateVersion(request.clusterStateVersion()); @@ -433,7 +435,8 @@ public class PeerRecoveryTargetService extends AbstractComponent implements Inde class HandoffPrimaryContextRequestHandler implements TransportRequestHandler { @Override - public void messageReceived(final RecoveryHandoffPrimaryContextRequest request, final TransportChannel channel) throws Exception { + public void messageReceived(final RecoveryHandoffPrimaryContextRequest request, final TransportChannel channel, + Task task) throws Exception { try (RecoveryRef recoveryRef = onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId())) { recoveryRef.target().handoffPrimaryContext(request.primaryContext()); } @@ -445,7 +448,8 @@ public class PeerRecoveryTargetService extends AbstractComponent implements Inde class TranslogOperationsRequestHandler implements TransportRequestHandler { @Override - public void messageReceived(final RecoveryTranslogOperationsRequest request, final TransportChannel channel) throws IOException { + public void messageReceived(final RecoveryTranslogOperationsRequest request, final TransportChannel channel, + Task task) throws IOException { try (RecoveryRef recoveryRef = onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId())) { final ClusterStateObserver observer = new ClusterStateObserver(clusterService, null, logger, threadPool.getThreadContext()); @@ -463,7 +467,7 @@ public class PeerRecoveryTargetService extends AbstractComponent implements Inde @Override public void onNewClusterState(ClusterState state) { try { - messageReceived(request, channel); + messageReceived(request, channel, task); } catch (Exception e) { onFailure(e); } @@ -537,7 +541,7 @@ public class PeerRecoveryTargetService extends AbstractComponent implements Inde class FilesInfoRequestHandler implements TransportRequestHandler { @Override - public void messageReceived(RecoveryFilesInfoRequest request, TransportChannel channel) throws Exception { + public void messageReceived(RecoveryFilesInfoRequest request, TransportChannel channel, Task task) throws Exception { try (RecoveryRef recoveryRef = onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId() )) { recoveryRef.target().receiveFileInfo(request.phase1FileNames, request.phase1FileSizes, request.phase1ExistingFileNames, @@ -550,7 +554,7 @@ public class PeerRecoveryTargetService extends AbstractComponent implements Inde class CleanFilesRequestHandler implements TransportRequestHandler { @Override - public void messageReceived(RecoveryCleanFilesRequest request, TransportChannel channel) throws Exception { + public void messageReceived(RecoveryCleanFilesRequest request, TransportChannel channel, Task task) throws Exception { try (RecoveryRef recoveryRef = onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId() )) { recoveryRef.target().cleanFiles(request.totalTranslogOps(), request.sourceMetaSnapshot()); @@ -565,7 +569,7 @@ public class PeerRecoveryTargetService extends AbstractComponent implements Inde final AtomicLong bytesSinceLastPause = new AtomicLong(); @Override - public void messageReceived(final RecoveryFileChunkRequest request, TransportChannel channel) throws Exception { + public void messageReceived(final RecoveryFileChunkRequest request, TransportChannel channel, Task task) throws Exception { try (RecoveryRef recoveryRef = onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId() )) { final RecoveryTarget recoveryTarget = recoveryRef.target(); diff --git a/server/src/main/java/org/elasticsearch/indices/store/IndicesStore.java b/server/src/main/java/org/elasticsearch/indices/store/IndicesStore.java index 29f6e7aeeec..f62618ec91e 100644 --- a/server/src/main/java/org/elasticsearch/indices/store/IndicesStore.java +++ b/server/src/main/java/org/elasticsearch/indices/store/IndicesStore.java @@ -49,6 +49,7 @@ import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportException; @@ -299,7 +300,7 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe private class ShardActiveRequestHandler implements TransportRequestHandler { @Override - public void messageReceived(final ShardActiveRequest request, final TransportChannel channel) throws Exception { + public void messageReceived(final ShardActiveRequest request, final TransportChannel channel, Task task) throws Exception { IndexShard indexShard = getShard(request); // make sure shard is really there before register cluster state observer diff --git a/server/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java b/server/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java index ba3f9c048d0..380ae974080 100644 --- a/server/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java @@ -31,6 +31,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.repositories.RepositoriesService.VerifyResponse; +import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.EmptyTransportResponseHandler; import org.elasticsearch.transport.TransportChannel; @@ -146,7 +147,7 @@ public class VerifyNodeRepositoryAction extends AbstractComponent { class VerifyNodeRepositoryRequestHandler implements TransportRequestHandler { @Override - public void messageReceived(VerifyNodeRepositoryRequest request, TransportChannel channel) throws Exception { + public void messageReceived(VerifyNodeRepositoryRequest request, TransportChannel channel, Task task) throws Exception { DiscoveryNode localNode = clusterService.state().nodes().getLocalNode(); try { doVerify(request.repository, request.verificationToken, localNode); diff --git a/server/src/main/java/org/elasticsearch/transport/RequestHandlerRegistry.java b/server/src/main/java/org/elasticsearch/transport/RequestHandlerRegistry.java index 91b54ab8f20..4e09daf9ccf 100644 --- a/server/src/main/java/org/elasticsearch/transport/RequestHandlerRegistry.java +++ b/server/src/main/java/org/elasticsearch/transport/RequestHandlerRegistry.java @@ -59,7 +59,7 @@ public class RequestHandlerRegistry { public void processMessageReceived(Request request, TransportChannel channel) throws Exception { final Task task = taskManager.register(channel.getChannelType(), action, request); if (task == null) { - handler.messageReceived(request, channel); + handler.messageReceived(request, channel, null); } else { boolean success = false; try { diff --git a/server/src/main/java/org/elasticsearch/transport/TaskAwareTransportRequestHandler.java b/server/src/main/java/org/elasticsearch/transport/TaskAwareTransportRequestHandler.java deleted file mode 100644 index 12899d86d43..00000000000 --- a/server/src/main/java/org/elasticsearch/transport/TaskAwareTransportRequestHandler.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.transport; - -/** - * Transport request handlers that is using task context - */ -public abstract class TaskAwareTransportRequestHandler implements TransportRequestHandler { - @Override - public final void messageReceived(T request, TransportChannel channel) throws Exception { - throw new UnsupportedOperationException("the task parameter is required"); - } -} diff --git a/server/src/main/java/org/elasticsearch/transport/TransportActionProxy.java b/server/src/main/java/org/elasticsearch/transport/TransportActionProxy.java index 8c48f088743..a17509e8260 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportActionProxy.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportActionProxy.java @@ -22,6 +22,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; @@ -52,7 +53,7 @@ public final class TransportActionProxy { } @Override - public void messageReceived(T request, TransportChannel channel) throws Exception { + public void messageReceived(T request, TransportChannel channel, Task task) throws Exception { DiscoveryNode targetNode = request.targetNode; TransportRequest wrappedRequest = request.wrapped; service.sendRequest(targetNode, action, wrappedRequest, diff --git a/server/src/main/java/org/elasticsearch/transport/TransportRequestHandler.java b/server/src/main/java/org/elasticsearch/transport/TransportRequestHandler.java index 8c90b82fe7c..be957988068 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportRequestHandler.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportRequestHandler.java @@ -23,12 +23,5 @@ import org.elasticsearch.tasks.Task; public interface TransportRequestHandler { - /** - * Override this method if access to the Task parameter is needed - */ - default void messageReceived(final T request, final TransportChannel channel, Task task) throws Exception { - messageReceived(request, channel); - } - - void messageReceived(T request, TransportChannel channel) throws Exception; + void messageReceived(T request, TransportChannel channel, Task task) throws Exception; } diff --git a/server/src/main/java/org/elasticsearch/transport/TransportService.java b/server/src/main/java/org/elasticsearch/transport/TransportService.java index 656d8c38417..8d3929cd661 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportService.java @@ -231,7 +231,7 @@ public class TransportService extends AbstractLifecycleComponent { () -> HandshakeRequest.INSTANCE, ThreadPool.Names.SAME, false, false, - (request, channel) -> channel.sendResponse( + (request, channel, task) -> channel.sendResponse( new HandshakeResponse(localNode, clusterName, localNode.getVersion()))); if (connectToRemoteCluster) { // here we start to connect to the remote clusters diff --git a/server/src/test/java/org/elasticsearch/action/IndicesRequestIT.java b/server/src/test/java/org/elasticsearch/action/IndicesRequestIT.java index 8fac0b91cd6..40795bff730 100644 --- a/server/src/test/java/org/elasticsearch/action/IndicesRequestIT.java +++ b/server/src/test/java/org/elasticsearch/action/IndicesRequestIT.java @@ -779,11 +779,6 @@ public class IndicesRequestIT extends ESIntegTestCase { } requestHandler.messageReceived(request, channel, task); } - - @Override - public void messageReceived(T request, TransportChannel channel) throws Exception { - messageReceived(request, channel, null); - } } } } diff --git a/server/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java b/server/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java index 61beb59bc0c..fdc3d890363 100644 --- a/server/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java @@ -364,7 +364,7 @@ public class TransportBroadcastByNodeActionTests extends ESTestCase { TestTransportChannel channel = new TestTransportChannel(); - handler.messageReceived(action.new NodeRequest(nodeId, new Request(), new ArrayList<>(shards)), channel); + handler.messageReceived(action.new NodeRequest(nodeId, new Request(), new ArrayList<>(shards)), channel, null); // check the operation was executed only on the expected shards assertEquals(shards, action.getResults().keySet()); diff --git a/server/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java b/server/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java index c8030e1cf4a..2beaed1e106 100644 --- a/server/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java +++ b/server/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java @@ -36,6 +36,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.node.Node; +import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.TestThreadPool; @@ -469,7 +470,7 @@ public class TransportClientNodesServiceTests extends ESTestCase { } @Override - public void messageReceived(ClusterStateRequest request, TransportChannel channel) throws Exception { + public void messageReceived(ClusterStateRequest request, TransportChannel channel, Task task) throws Exception { if (block.get()) { release.await(); return; diff --git a/server/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java b/server/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java index a60a23bcd6d..6dbf80d9be6 100644 --- a/server/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java @@ -368,7 +368,7 @@ public class ZenDiscoveryUnitTests extends ESTestCase { .routingTable(RoutingTable.builder().add(indexRoutingTable).build()); if (incompatible) { IllegalStateException ex = expectThrows(IllegalStateException.class, () -> - request.messageReceived(new MembershipAction.ValidateJoinRequest(stateBuilder.build()), null)); + request.messageReceived(new MembershipAction.ValidateJoinRequest(stateBuilder.build()), null, null)); assertEquals("index [test] version not supported: " + VersionUtils.getPreviousVersion(Version.CURRENT.minimumIndexCompatibilityVersion()) + " minimum compatible index version is: " + Version.CURRENT.minimumIndexCompatibilityVersion(), ex.getMessage()); @@ -400,7 +400,7 @@ public class ZenDiscoveryUnitTests extends ESTestCase { public void sendResponse(Exception exception) throws IOException { } - }); + }, null); assertTrue(sendResponse.get()); } } diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java index 637b8fb26a8..0369eda2a88 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java @@ -114,7 +114,7 @@ public class RemoteClusterConnectionTests extends ESTestCase { MockTransportService newService = MockTransportService.createNewService(s, version, threadPool, null); try { newService.registerRequestHandler(ClusterSearchShardsAction.NAME,ThreadPool.Names.SAME, ClusterSearchShardsRequest::new, - (request, channel) -> { + (request, channel, task) -> { if ("index_not_found".equals(request.preference())) { channel.sendResponse(new IndexNotFoundException("index")); } else { @@ -123,7 +123,7 @@ public class RemoteClusterConnectionTests extends ESTestCase { } }); newService.registerRequestHandler(ClusterStateAction.NAME, ThreadPool.Names.SAME, ClusterStateRequest::new, - (request, channel) -> { + (request, channel, task) -> { DiscoveryNodes.Builder builder = DiscoveryNodes.builder(); for (DiscoveryNode node : knownNodes) { builder.add(node); diff --git a/server/src/test/java/org/elasticsearch/transport/TransportActionProxyTests.java b/server/src/test/java/org/elasticsearch/transport/TransportActionProxyTests.java index 3f4ae7bdd2d..491ba123a45 100644 --- a/server/src/test/java/org/elasticsearch/transport/TransportActionProxyTests.java +++ b/server/src/test/java/org/elasticsearch/transport/TransportActionProxyTests.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.transport; -import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; @@ -26,6 +25,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.TestThreadPool; @@ -88,7 +88,7 @@ public class TransportActionProxyTests extends ESTestCase { public void testSendMessage() throws InterruptedException { serviceA.registerRequestHandler("/test", SimpleTestRequest::new, ThreadPool.Names.SAME, - (request, channel) -> { + (request, channel, task) -> { assertEquals(request.sourceNode, "TS_A"); SimpleTestResponse response = new SimpleTestResponse(); response.targetNode = "TS_A"; @@ -98,7 +98,7 @@ public class TransportActionProxyTests extends ESTestCase { serviceA.connectToNode(nodeB); serviceB.registerRequestHandler("/test", SimpleTestRequest::new, ThreadPool.Names.SAME, - (request, channel) -> { + (request, channel, task) -> { assertEquals(request.sourceNode, "TS_A"); SimpleTestResponse response = new SimpleTestResponse(); response.targetNode = "TS_B"; @@ -107,7 +107,7 @@ public class TransportActionProxyTests extends ESTestCase { TransportActionProxy.registerProxyAction(serviceB, "/test", SimpleTestResponse::new); serviceB.connectToNode(nodeC); serviceC.registerRequestHandler("/test", SimpleTestRequest::new, ThreadPool.Names.SAME, - (request, channel) -> { + (request, channel, task) -> { assertEquals(request.sourceNode, "TS_A"); SimpleTestResponse response = new SimpleTestResponse(); response.targetNode = "TS_C"; @@ -151,7 +151,7 @@ public class TransportActionProxyTests extends ESTestCase { public void testException() throws InterruptedException { serviceA.registerRequestHandler("/test", SimpleTestRequest::new, ThreadPool.Names.SAME, - (request, channel) -> { + (request, channel, task) -> { assertEquals(request.sourceNode, "TS_A"); SimpleTestResponse response = new SimpleTestResponse(); response.targetNode = "TS_A"; @@ -161,7 +161,7 @@ public class TransportActionProxyTests extends ESTestCase { serviceA.connectToNode(nodeB); serviceB.registerRequestHandler("/test", SimpleTestRequest::new, ThreadPool.Names.SAME, - (request, channel) -> { + (request, channel, task) -> { assertEquals(request.sourceNode, "TS_A"); SimpleTestResponse response = new SimpleTestResponse(); response.targetNode = "TS_B"; @@ -170,7 +170,7 @@ public class TransportActionProxyTests extends ESTestCase { TransportActionProxy.registerProxyAction(serviceB, "/test", SimpleTestResponse::new); serviceB.connectToNode(nodeC); serviceC.registerRequestHandler("/test", SimpleTestRequest::new, ThreadPool.Names.SAME, - (request, channel) -> { + (request, channel, task) -> { throw new ElasticsearchException("greetings from TS_C"); }); TransportActionProxy.registerProxyAction(serviceC, "/test", SimpleTestResponse::new); diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index 587c192beb2..0b676e14034 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -47,6 +47,7 @@ import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.mocksocket.MockServerSocket; import org.elasticsearch.node.Node; +import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; import org.elasticsearch.test.transport.MockTransportService; @@ -205,7 +206,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { public void testHelloWorld() { serviceA.registerRequestHandler("sayHello", StringMessageRequest::new, ThreadPool.Names.GENERIC, - (request, channel) -> { + (request, channel, task) -> { assertThat("moshe", equalTo(request.message)); try { channel.sendResponse(new StringMessageResponse("hello " + request.message)); @@ -280,7 +281,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { public void testThreadContext() throws ExecutionException, InterruptedException { - serviceA.registerRequestHandler("ping_pong", StringMessageRequest::new, ThreadPool.Names.GENERIC, (request, channel) -> { + serviceA.registerRequestHandler("ping_pong", StringMessageRequest::new, ThreadPool.Names.GENERIC, (request, channel, task) -> { assertEquals("ping_user", threadPool.getThreadContext().getHeader("test.ping.user")); assertNull(threadPool.getThreadContext().getTransient("my_private_context")); try { @@ -339,7 +340,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { serviceA.disconnectFromNode(nodeA); final AtomicReference exception = new AtomicReference<>(); serviceA.registerRequestHandler("localNode", StringMessageRequest::new, ThreadPool.Names.GENERIC, - (request, channel) -> { + (request, channel, task) -> { try { channel.sendResponse(new StringMessageResponse(request.message)); } catch (IOException e) { @@ -377,7 +378,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { } public void testAdapterSendReceiveCallbacks() throws Exception { - final TransportRequestHandler requestHandler = (request, channel) -> { + final TransportRequestHandler requestHandler = (request, channel, task) -> { try { if (randomBoolean()) { channel.sendResponse(TransportResponse.Empty.INSTANCE); @@ -485,7 +486,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { public void testVoidMessageCompressed() { serviceA.registerRequestHandler("sayHello", TransportRequest.Empty::new, ThreadPool.Names.GENERIC, - (request, channel) -> { + (request, channel, task) -> { try { TransportResponseOptions responseOptions = TransportResponseOptions.builder().withCompress(true).build(); channel.sendResponse(TransportResponse.Empty.INSTANCE, responseOptions); @@ -531,7 +532,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { serviceA.registerRequestHandler("sayHello", StringMessageRequest::new, ThreadPool.Names.GENERIC, new TransportRequestHandler() { @Override - public void messageReceived(StringMessageRequest request, TransportChannel channel) { + public void messageReceived(StringMessageRequest request, TransportChannel channel, Task task) { assertThat("moshe", equalTo(request.message)); try { TransportResponseOptions responseOptions = TransportResponseOptions.builder().withCompress(true).build(); @@ -580,7 +581,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { serviceA.registerRequestHandler("sayHelloException", StringMessageRequest::new, ThreadPool.Names.GENERIC, new TransportRequestHandler() { @Override - public void messageReceived(StringMessageRequest request, TransportChannel channel) throws Exception { + public void messageReceived(StringMessageRequest request, TransportChannel channel, Task task) throws Exception { assertThat("moshe", equalTo(request.message)); throw new RuntimeException("bad message !!!"); } @@ -639,7 +640,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { Set sendingErrors = ConcurrentCollections.newConcurrentSet(); Set responseErrors = ConcurrentCollections.newConcurrentSet(); serviceA.registerRequestHandler("test", TestRequest::new, - randomBoolean() ? ThreadPool.Names.SAME : ThreadPool.Names.GENERIC, (request, channel) -> { + randomBoolean() ? ThreadPool.Names.SAME : ThreadPool.Names.GENERIC, (request, channel, task) -> { try { channel.sendResponse(new TestResponse()); } catch (Exception e) { @@ -647,7 +648,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { responseErrors.add(e); } }); - final TransportRequestHandler ignoringRequestHandler = (request, channel) -> { + final TransportRequestHandler ignoringRequestHandler = (request, channel, task) -> { try { channel.sendResponse(new TestResponse()); } catch (Exception e) { @@ -763,7 +764,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { final CountDownLatch latch2 = new CountDownLatch(1); try { serviceA.registerRequestHandler("foobar", StringMessageRequest::new, ThreadPool.Names.GENERIC, - (request, channel) -> { + (request, channel, task) -> { try { latch2.await(); logger.info("Stop ServiceB now"); @@ -791,7 +792,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { serviceA.registerRequestHandler("sayHelloTimeoutNoResponse", StringMessageRequest::new, ThreadPool.Names.GENERIC, new TransportRequestHandler() { @Override - public void messageReceived(StringMessageRequest request, TransportChannel channel) { + public void messageReceived(StringMessageRequest request, TransportChannel channel, Task task) { assertThat("moshe", equalTo(request.message)); // don't send back a response } @@ -836,7 +837,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { serviceA.registerRequestHandler("sayHelloTimeoutDelayedResponse", StringMessageRequest::new, ThreadPool.Names.GENERIC, new TransportRequestHandler() { @Override - public void messageReceived(StringMessageRequest request, TransportChannel channel) throws InterruptedException { + public void messageReceived(StringMessageRequest request, TransportChannel channel, Task task) throws InterruptedException { String message = request.message; inFlight.acquireUninterruptibly(); try { @@ -938,10 +939,10 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { } public void testTracerLog() throws InterruptedException { - TransportRequestHandler handler = (request, channel) -> channel.sendResponse(new StringMessageResponse("")); + TransportRequestHandler handler = (request, channel, task) -> channel.sendResponse(new StringMessageResponse("")); TransportRequestHandler handlerWithError = new TransportRequestHandler() { @Override - public void messageReceived(StringMessageRequest request, TransportChannel channel) throws Exception { + public void messageReceived(StringMessageRequest request, TransportChannel channel, Task task) throws Exception { if (request.timeout() > 0) { Thread.sleep(request.timeout); } @@ -1257,7 +1258,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { serviceB.registerRequestHandler("/version", Version1Request::new, ThreadPool.Names.SAME, new TransportRequestHandler() { @Override - public void messageReceived(Version1Request request, TransportChannel channel) throws Exception { + public void messageReceived(Version1Request request, TransportChannel channel, Task task) throws Exception { assertThat(request.value1, equalTo(1)); assertThat(request.value2, equalTo(0)); // not set, coming from service A Version1Response response = new Version1Response(); @@ -1301,7 +1302,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { serviceA.registerRequestHandler("/version", Version0Request::new, ThreadPool.Names.SAME, new TransportRequestHandler() { @Override - public void messageReceived(Version0Request request, TransportChannel channel) throws Exception { + public void messageReceived(Version0Request request, TransportChannel channel, Task task) throws Exception { assertThat(request.value1, equalTo(1)); Version0Response response = new Version0Response(); response.value1 = 1; @@ -1344,7 +1345,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { public void testVersionFrom1to1() throws Exception { serviceB.registerRequestHandler("/version", Version1Request::new, ThreadPool.Names.SAME, - (request, channel) -> { + (request, channel, task) -> { assertThat(request.value1, equalTo(1)); assertThat(request.value2, equalTo(2)); Version1Response response = new Version1Response(); @@ -1388,7 +1389,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { public void testVersionFrom0to0() throws Exception { serviceA.registerRequestHandler("/version", Version0Request::new, ThreadPool.Names.SAME, - (request, channel) -> { + (request, channel, task) -> { assertThat(request.value1, equalTo(1)); Version0Response response = new Version0Response(); response.value1 = 1; @@ -1427,7 +1428,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { public void testMockFailToSendNoConnectRule() throws Exception { serviceA.registerRequestHandler("sayHello", StringMessageRequest::new, ThreadPool.Names.GENERIC, - (request, channel) -> { + (request, channel, task) -> { assertThat("moshe", equalTo(request.message)); throw new RuntimeException("bad message !!!"); }); @@ -1484,7 +1485,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { public void testMockUnresponsiveRule() throws IOException { serviceA.registerRequestHandler("sayHello", StringMessageRequest::new, ThreadPool.Names.GENERIC, - (request, channel) -> { + (request, channel, task) -> { assertThat("moshe", equalTo(request.message)); throw new RuntimeException("bad message !!!"); }); @@ -1540,7 +1541,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { final AtomicReference addressB = new AtomicReference<>(); serviceB.registerRequestHandler("action1", TestRequest::new, ThreadPool.Names.SAME, new TransportRequestHandler() { @Override - public void messageReceived(TestRequest request, TransportChannel channel) throws Exception { + public void messageReceived(TestRequest request, TransportChannel channel, Task task) throws Exception { addressA.set(request.remoteAddress()); channel.sendResponse(new TestResponse()); latch.countDown(); @@ -1582,7 +1583,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { Settings.EMPTY, false, false)) { AtomicBoolean requestProcessed = new AtomicBoolean(false); service.registerRequestHandler("action", TestRequest::new, ThreadPool.Names.SAME, - (request, channel) -> { + (request, channel, task) -> { requestProcessed.set(true); channel.sendResponse(TransportResponse.Empty.INSTANCE); }); @@ -1744,7 +1745,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { } @Override - public void messageReceived(TestRequest request, TransportChannel channel) throws Exception { + public void messageReceived(TestRequest request, TransportChannel channel, Task task) throws Exception { if (randomBoolean()) { Thread.sleep(randomIntBetween(10, 50)); } @@ -1868,18 +1869,18 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { public void testRegisterHandlerTwice() { serviceB.registerRequestHandler("action1", TestRequest::new, randomFrom(ThreadPool.Names.SAME, ThreadPool.Names.GENERIC), - (request, message) -> { + (request, message, task) -> { throw new AssertionError("boom"); }); expectThrows(IllegalArgumentException.class, () -> serviceB.registerRequestHandler("action1", TestRequest::new, randomFrom(ThreadPool.Names.SAME, ThreadPool.Names.GENERIC), - (request, message) -> { + (request, message, task) -> { throw new AssertionError("boom"); }) ); serviceA.registerRequestHandler("action1", TestRequest::new, randomFrom(ThreadPool.Names.SAME, ThreadPool.Names.GENERIC), - (request, message) -> { + (request, message, task) -> { throw new AssertionError("boom"); }); } @@ -2066,7 +2067,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { List executors = new ArrayList<>(ThreadPool.THREAD_POOL_TYPES.keySet()); CollectionUtil.timSort(executors); // makes sure it's reproducible serviceA.registerRequestHandler("action", TestRequest::new, ThreadPool.Names.SAME, - (request, channel) -> { + (request, channel, task) -> { threadPool.getThreadContext().putTransient("boom", new Object()); threadPool.getThreadContext().addResponseHeader("foo.bar", "baz"); @@ -2127,7 +2128,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { CollectionUtil.timSort(executors); // makes sure it's reproducible TransportService serviceC = build(Settings.builder().put("name", "TS_TEST").build(), version0, null, true); serviceC.registerRequestHandler("action", TestRequest::new, ThreadPool.Names.SAME, - (request, channel) -> { + (request, channel, task) -> { // do nothing }); serviceC.start(); @@ -2187,7 +2188,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { CountDownLatch receivedLatch = new CountDownLatch(1); CountDownLatch sendResponseLatch = new CountDownLatch(1); serviceC.registerRequestHandler("action", TestRequest::new, ThreadPool.Names.SAME, - (request, channel) -> { + (request, channel, task) -> { // don't block on a network thread here threadPool.generic().execute(new AbstractRunnable() { @Override @@ -2255,7 +2256,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { CountDownLatch receivedLatch = new CountDownLatch(1); CountDownLatch sendResponseLatch = new CountDownLatch(1); serviceB.registerRequestHandler("action", TestRequest::new, ThreadPool.Names.SAME, - (request, channel) -> { + (request, channel, task) -> { // don't block on a network thread here threadPool.generic().execute(new AbstractRunnable() { @Override @@ -2368,7 +2369,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { Exception ex = new RuntimeException("boom"); ex.setStackTrace(new StackTraceElement[0]); serviceB.registerRequestHandler("action", TestRequest::new, ThreadPool.Names.SAME, - (request, channel) -> { + (request, channel, task) -> { // don't block on a network thread here threadPool.generic().execute(new AbstractRunnable() { @Override diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportRollupSearchAction.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportRollupSearchAction.java index a9f3dc5a1b7..239be32033f 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportRollupSearchAction.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportRollupSearchAction.java @@ -399,11 +399,6 @@ public class TransportRollupSearchAction extends TransportAction { - @Override - public final void messageReceived(SearchRequest request, TransportChannel channel) throws Exception { - throw new UnsupportedOperationException("the task parameter is required for this operation"); - } - @Override public final void messageReceived(final SearchRequest request, final TransportChannel channel, Task task) throws Exception { // We already got the task created on the network layer - no need to create it again on the transport layer diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptor.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptor.java index 7de3e5d0980..55287d5d503 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptor.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptor.java @@ -318,10 +318,5 @@ public class SecurityServerTransportInterceptor extends AbstractComponent implem } } } - - @Override - public void messageReceived(T request, TransportChannel channel) throws Exception { - throw new UnsupportedOperationException("task parameter is required for this operation"); - } } } From 16e4e7a7cfb5196e02e6fc988f34553dc9d34acc Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Fri, 22 Jun 2018 17:15:29 +0200 Subject: [PATCH 6/8] Node selector per client rather than per request (#31471) We have made node selectors configurable per request, but all of other language clients don't allow for that. A good reason not to do so, is that having a different node selector per request breaks round-robin. This commit makes NodeSelector configurable only at client initialization. It also improves the docs on this matter, important given that a single node selector can still affect round-robin. --- .../elasticsearch/client/NodeSelector.java | 6 +-- .../elasticsearch/client/RequestOptions.java | 35 ++----------- .../org/elasticsearch/client/RestClient.java | 16 +++--- .../client/RestClientBuilder.java | 14 ++++- .../client/NodeSelectorTests.java | 2 +- .../client/RequestOptionsTests.java | 9 +--- .../RestClientMultipleHostsIntegTests.java | 46 ++++++++--------- .../client/RestClientMultipleHostsTests.java | 33 +++++------- .../client/RestClientSingleHostTests.java | 2 +- .../elasticsearch/client/RestClientTests.java | 4 +- .../RestClientDocumentation.java | 51 +++++++++++++++---- .../low-level/configuration.asciidoc | 27 ++++++++++ docs/java-rest/low-level/usage.asciidoc | 22 ++++---- .../smoketest/DocsClientYamlTestSuiteIT.java | 5 +- .../test/rest/ESRestTestCase.java | 12 +++-- .../rest/yaml/ClientYamlDocsTestClient.java | 11 ++-- .../test/rest/yaml/ClientYamlTestClient.java | 38 ++++++++++---- .../rest/yaml/ESClientYamlSuiteTestCase.java | 13 ++--- .../section/ClientYamlTestSectionTests.java | 4 +- .../smoketest/XDocsClientYamlTestSuiteIT.java | 6 +-- 20 files changed, 208 insertions(+), 148 deletions(-) diff --git a/client/rest/src/main/java/org/elasticsearch/client/NodeSelector.java b/client/rest/src/main/java/org/elasticsearch/client/NodeSelector.java index 5f5296fe16b..b3efa08befa 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/NodeSelector.java +++ b/client/rest/src/main/java/org/elasticsearch/client/NodeSelector.java @@ -24,7 +24,7 @@ import java.util.Iterator; /** * Selects nodes that can receive requests. Used to keep requests away * from master nodes or to send them to nodes with a particular attribute. - * Use with {@link RequestOptions.Builder#setNodeSelector(NodeSelector)}. + * Use with {@link RestClientBuilder#setNodeSelector(NodeSelector)}. */ public interface NodeSelector { /** @@ -68,7 +68,7 @@ public interface NodeSelector { * have the {@code master} role OR it has the data {@code data} * role. */ - NodeSelector NOT_MASTER_ONLY = new NodeSelector() { + NodeSelector SKIP_DEDICATED_MASTERS = new NodeSelector() { @Override public void select(Iterable nodes) { for (Iterator itr = nodes.iterator(); itr.hasNext();) { @@ -84,7 +84,7 @@ public interface NodeSelector { @Override public String toString() { - return "NOT_MASTER_ONLY"; + return "SKIP_DEDICATED_MASTERS"; } }; } diff --git a/client/rest/src/main/java/org/elasticsearch/client/RequestOptions.java b/client/rest/src/main/java/org/elasticsearch/client/RequestOptions.java index 97d150da3d3..cf6bd3d49f5 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RequestOptions.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RequestOptions.java @@ -37,22 +37,18 @@ import java.util.ArrayList; */ public final class RequestOptions { public static final RequestOptions DEFAULT = new Builder( - Collections.
    emptyList(), NodeSelector.ANY, - HeapBufferedResponseConsumerFactory.DEFAULT).build(); + Collections.
    emptyList(), HeapBufferedResponseConsumerFactory.DEFAULT).build(); private final List
    headers; - private final NodeSelector nodeSelector; private final HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory; private RequestOptions(Builder builder) { this.headers = Collections.unmodifiableList(new ArrayList<>(builder.headers)); - this.nodeSelector = builder.nodeSelector; this.httpAsyncResponseConsumerFactory = builder.httpAsyncResponseConsumerFactory; } public Builder toBuilder() { - Builder builder = new Builder(headers, nodeSelector, httpAsyncResponseConsumerFactory); - return builder; + return new Builder(headers, httpAsyncResponseConsumerFactory); } /** @@ -62,14 +58,6 @@ public final class RequestOptions { return headers; } - /** - * The selector that chooses which nodes are valid destinations for - * {@link Request}s with these options. - */ - public NodeSelector getNodeSelector() { - return nodeSelector; - } - /** * The {@link HttpAsyncResponseConsumerFactory} used to create one * {@link HttpAsyncResponseConsumer} callback per retry. Controls how the @@ -93,9 +81,6 @@ public final class RequestOptions { b.append(headers.get(h).toString()); } } - if (nodeSelector != NodeSelector.ANY) { - b.append(", nodeSelector=").append(nodeSelector); - } if (httpAsyncResponseConsumerFactory != HttpAsyncResponseConsumerFactory.DEFAULT) { b.append(", consumerFactory=").append(httpAsyncResponseConsumerFactory); } @@ -113,24 +98,20 @@ public final class RequestOptions { RequestOptions other = (RequestOptions) obj; return headers.equals(other.headers) - && nodeSelector.equals(other.nodeSelector) && httpAsyncResponseConsumerFactory.equals(other.httpAsyncResponseConsumerFactory); } @Override public int hashCode() { - return Objects.hash(headers, nodeSelector, httpAsyncResponseConsumerFactory); + return Objects.hash(headers, httpAsyncResponseConsumerFactory); } public static class Builder { private final List
    headers; - private NodeSelector nodeSelector; private HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory; - private Builder(List
    headers, NodeSelector nodeSelector, - HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory) { + private Builder(List
    headers, HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory) { this.headers = new ArrayList<>(headers); - this.nodeSelector = nodeSelector; this.httpAsyncResponseConsumerFactory = httpAsyncResponseConsumerFactory; } @@ -150,14 +131,6 @@ public final class RequestOptions { this.headers.add(new ReqHeader(name, value)); } - /** - * Configure the selector that chooses which nodes are valid - * destinations for {@link Request}s with these options - */ - public void setNodeSelector(NodeSelector nodeSelector) { - this.nodeSelector = Objects.requireNonNull(nodeSelector, "nodeSelector cannot be null"); - } - /** * Set the {@link HttpAsyncResponseConsumerFactory} used to create one * {@link HttpAsyncResponseConsumer} callback per retry. Controls how the diff --git a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java index 82039cab5d0..77c11db455e 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java @@ -48,6 +48,7 @@ import org.apache.http.nio.protocol.HttpAsyncRequestProducer; import org.apache.http.nio.protocol.HttpAsyncResponseConsumer; import org.elasticsearch.client.DeadHostState.TimeSupplier; +import javax.net.ssl.SSLHandshakeException; import java.io.Closeable; import java.io.IOException; import java.net.ConnectException; @@ -74,7 +75,6 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; -import javax.net.ssl.SSLHandshakeException; import static java.util.Collections.singletonList; @@ -108,15 +108,17 @@ public class RestClient implements Closeable { private final AtomicInteger lastNodeIndex = new AtomicInteger(0); private final ConcurrentMap blacklist = new ConcurrentHashMap<>(); private final FailureListener failureListener; + private final NodeSelector nodeSelector; private volatile NodeTuple> nodeTuple; RestClient(CloseableHttpAsyncClient client, long maxRetryTimeoutMillis, Header[] defaultHeaders, - List nodes, String pathPrefix, FailureListener failureListener) { + List nodes, String pathPrefix, FailureListener failureListener, NodeSelector nodeSelector) { this.client = client; this.maxRetryTimeoutMillis = maxRetryTimeoutMillis; this.defaultHeaders = Collections.unmodifiableList(Arrays.asList(defaultHeaders)); this.failureListener = failureListener; this.pathPrefix = pathPrefix; + this.nodeSelector = nodeSelector; setNodes(nodes); } @@ -146,7 +148,7 @@ public class RestClient implements Closeable { /** * Replaces the hosts with which the client communicates. * - * @deprecated prefer {@link setNodes} because it allows you + * @deprecated prefer {@link #setNodes(Collection)} because it allows you * to set metadata for use with {@link NodeSelector}s */ @Deprecated @@ -180,8 +182,8 @@ public class RestClient implements Closeable { throw new IllegalArgumentException("hosts must not be null nor empty"); } List nodes = new ArrayList<>(hosts.length); - for (int i = 0; i < hosts.length; i++) { - nodes.add(new Node(hosts[i])); + for (HttpHost host : hosts) { + nodes.add(new Node(host)); } return nodes; } @@ -509,7 +511,7 @@ public class RestClient implements Closeable { setHeaders(httpRequest, request.getOptions().getHeaders()); FailureTrackingResponseListener failureTrackingResponseListener = new FailureTrackingResponseListener(listener); long startTime = System.nanoTime(); - performRequestAsync(startTime, nextNode(request.getOptions().getNodeSelector()), httpRequest, ignoreErrorCodes, + performRequestAsync(startTime, nextNode(), httpRequest, ignoreErrorCodes, request.getOptions().getHttpAsyncResponseConsumerFactory(), failureTrackingResponseListener); } @@ -611,7 +613,7 @@ public class RestClient implements Closeable { * that is closest to being revived. * @throws IOException if no nodes are available */ - private NodeTuple> nextNode(NodeSelector nodeSelector) throws IOException { + private NodeTuple> nextNode() throws IOException { NodeTuple> nodeTuple = this.nodeTuple; List hosts = selectHosts(nodeTuple, blacklist, lastNodeIndex, nodeSelector); return new NodeTuple<>(hosts.iterator(), nodeTuple.authCache); diff --git a/client/rest/src/main/java/org/elasticsearch/client/RestClientBuilder.java b/client/rest/src/main/java/org/elasticsearch/client/RestClientBuilder.java index 17d27248dfe..fb61f4f17c4 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RestClientBuilder.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RestClientBuilder.java @@ -55,6 +55,7 @@ public final class RestClientBuilder { private HttpClientConfigCallback httpClientConfigCallback; private RequestConfigCallback requestConfigCallback; private String pathPrefix; + private NodeSelector nodeSelector = NodeSelector.ANY; /** * Creates a new builder instance and sets the hosts that the client will send requests to. @@ -173,6 +174,16 @@ public final class RestClientBuilder { return this; } + /** + * Sets the {@link NodeSelector} to be used for all requests. + * @throws NullPointerException if the provided nodeSelector is null + */ + public RestClientBuilder setNodeSelector(NodeSelector nodeSelector) { + Objects.requireNonNull(nodeSelector, "nodeSelector must not be null"); + this.nodeSelector = nodeSelector; + return this; + } + /** * Creates a new {@link RestClient} based on the provided configuration. */ @@ -186,7 +197,8 @@ public final class RestClientBuilder { return createHttpClient(); } }); - RestClient restClient = new RestClient(httpClient, maxRetryTimeout, defaultHeaders, nodes, pathPrefix, failureListener); + RestClient restClient = new RestClient(httpClient, maxRetryTimeout, defaultHeaders, nodes, + pathPrefix, failureListener, nodeSelector); httpClient.start(); return restClient; } diff --git a/client/rest/src/test/java/org/elasticsearch/client/NodeSelectorTests.java b/client/rest/src/test/java/org/elasticsearch/client/NodeSelectorTests.java index 868ccdcab75..83027db325b 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/NodeSelectorTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/NodeSelectorTests.java @@ -59,7 +59,7 @@ public class NodeSelectorTests extends RestClientTestCase { Collections.shuffle(nodes, getRandom()); List expected = new ArrayList<>(nodes); expected.remove(masterOnly); - NodeSelector.NOT_MASTER_ONLY.select(nodes); + NodeSelector.SKIP_DEDICATED_MASTERS.select(nodes); assertEquals(expected, nodes); } diff --git a/client/rest/src/test/java/org/elasticsearch/client/RequestOptionsTests.java b/client/rest/src/test/java/org/elasticsearch/client/RequestOptionsTests.java index a78be6c126b..19106792228 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RequestOptionsTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RequestOptionsTests.java @@ -114,10 +114,6 @@ public class RequestOptionsTests extends RestClientTestCase { } } - if (randomBoolean()) { - builder.setNodeSelector(mock(NodeSelector.class)); - } - if (randomBoolean()) { builder.setHttpAsyncResponseConsumerFactory(new HeapBufferedResponseConsumerFactory(1)); } @@ -131,15 +127,12 @@ public class RequestOptionsTests extends RestClientTestCase { private static RequestOptions mutate(RequestOptions options) { RequestOptions.Builder mutant = options.toBuilder(); - int mutationType = between(0, 2); + int mutationType = between(0, 1); switch (mutationType) { case 0: mutant.addHeader("extra", "m"); return mutant.build(); case 1: - mutant.setNodeSelector(mock(NodeSelector.class)); - return mutant.build(); - case 2: mutant.setHttpAsyncResponseConsumerFactory(new HeapBufferedResponseConsumerFactory(5)); return mutant.build(); default: diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java index 7f5915fe352..272859e8441 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java @@ -75,14 +75,15 @@ public class RestClientMultipleHostsIntegTests extends RestClientTestCase { httpServers[i] = httpServer; httpHosts[i] = new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort()); } - restClient = buildRestClient(); + restClient = buildRestClient(NodeSelector.ANY); } - private static RestClient buildRestClient() { + private static RestClient buildRestClient(NodeSelector nodeSelector) { RestClientBuilder restClientBuilder = RestClient.builder(httpHosts); if (pathPrefix.length() > 0) { restClientBuilder.setPathPrefix((randomBoolean() ? "/" : "") + pathPrefixWithoutLeadingSlash); } + restClientBuilder.setNodeSelector(nodeSelector); return restClientBuilder.build(); } @@ -199,29 +200,28 @@ public class RestClientMultipleHostsIntegTests extends RestClientTestCase { * test what happens after calling */ public void testNodeSelector() throws IOException { - Request request = new Request("GET", "/200"); - RequestOptions.Builder options = request.getOptions().toBuilder(); - options.setNodeSelector(firstPositionNodeSelector()); - request.setOptions(options); - int rounds = between(1, 10); - for (int i = 0; i < rounds; i++) { - /* - * Run the request more than once to verify that the - * NodeSelector overrides the round robin behavior. - */ - if (stoppedFirstHost) { - try { - restClient.performRequest(request); - fail("expected to fail to connect"); - } catch (ConnectException e) { - // Windows isn't consistent here. Sometimes the message is even null! - if (false == System.getProperty("os.name").startsWith("Windows")) { - assertEquals("Connection refused", e.getMessage()); + try (RestClient restClient = buildRestClient(firstPositionNodeSelector())) { + Request request = new Request("GET", "/200"); + int rounds = between(1, 10); + for (int i = 0; i < rounds; i++) { + /* + * Run the request more than once to verify that the + * NodeSelector overrides the round robin behavior. + */ + if (stoppedFirstHost) { + try { + restClient.performRequest(request); + fail("expected to fail to connect"); + } catch (ConnectException e) { + // Windows isn't consistent here. Sometimes the message is even null! + if (false == System.getProperty("os.name").startsWith("Windows")) { + assertEquals("Connection refused", e.getMessage()); + } } + } else { + Response response = restClient.performRequest(request); + assertEquals(httpHosts[0], response.getHost()); } - } else { - Response response = restClient.performRequest(request); - assertEquals(httpHosts[0], response.getHost()); } } } diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java index d04b3cbb755..e1062076a0d 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java @@ -35,9 +35,7 @@ import org.apache.http.message.BasicHttpResponse; import org.apache.http.message.BasicStatusLine; import org.apache.http.nio.protocol.HttpAsyncRequestProducer; import org.apache.http.nio.protocol.HttpAsyncResponseConsumer; -import org.elasticsearch.client.Node.Roles; import org.junit.After; -import org.junit.Before; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; @@ -74,13 +72,11 @@ import static org.mockito.Mockito.when; public class RestClientMultipleHostsTests extends RestClientTestCase { private ExecutorService exec = Executors.newFixedThreadPool(1); - private RestClient restClient; private List nodes; private HostsTrackingFailureListener failureListener; - @Before @SuppressWarnings("unchecked") - public void createRestClient() throws IOException { + public RestClient createRestClient(NodeSelector nodeSelector) { CloseableHttpAsyncClient httpClient = mock(CloseableHttpAsyncClient.class); when(httpClient.execute(any(HttpAsyncRequestProducer.class), any(HttpAsyncResponseConsumer.class), any(HttpClientContext.class), any(FutureCallback.class))).thenAnswer(new Answer>() { @@ -119,7 +115,7 @@ public class RestClientMultipleHostsTests extends RestClientTestCase { } nodes = Collections.unmodifiableList(nodes); failureListener = new HostsTrackingFailureListener(); - restClient = new RestClient(httpClient, 10000, new Header[0], nodes, null, failureListener); + return new RestClient(httpClient, 10000, new Header[0], nodes, null, failureListener, nodeSelector); } /** @@ -131,12 +127,13 @@ public class RestClientMultipleHostsTests extends RestClientTestCase { } public void testRoundRobinOkStatusCodes() throws IOException { + RestClient restClient = createRestClient(NodeSelector.ANY); int numIters = RandomNumbers.randomIntBetween(getRandom(), 1, 5); for (int i = 0; i < numIters; i++) { Set hostsSet = hostsSet(); for (int j = 0; j < nodes.size(); j++) { int statusCode = randomOkStatusCode(getRandom()); - Response response = restClient.performRequest(randomHttpMethod(getRandom()), "/" + statusCode); + Response response = restClient.performRequest(new Request(randomHttpMethod(getRandom()), "/" + statusCode)); assertEquals(statusCode, response.getStatusLine().getStatusCode()); assertTrue("host not found: " + response.getHost(), hostsSet.remove(response.getHost())); } @@ -146,6 +143,7 @@ public class RestClientMultipleHostsTests extends RestClientTestCase { } public void testRoundRobinNoRetryErrors() throws IOException { + RestClient restClient = createRestClient(NodeSelector.ANY); int numIters = RandomNumbers.randomIntBetween(getRandom(), 1, 5); for (int i = 0; i < numIters; i++) { Set hostsSet = hostsSet(); @@ -153,7 +151,7 @@ public class RestClientMultipleHostsTests extends RestClientTestCase { String method = randomHttpMethod(getRandom()); int statusCode = randomErrorNoRetryStatusCode(getRandom()); try { - Response response = restClient.performRequest(method, "/" + statusCode); + Response response = restClient.performRequest(new Request(method, "/" + statusCode)); if (method.equals("HEAD") && statusCode == 404) { //no exception gets thrown although we got a 404 assertEquals(404, response.getStatusLine().getStatusCode()); @@ -178,9 +176,10 @@ public class RestClientMultipleHostsTests extends RestClientTestCase { } public void testRoundRobinRetryErrors() throws IOException { + RestClient restClient = createRestClient(NodeSelector.ANY); String retryEndpoint = randomErrorRetryEndpoint(); try { - restClient.performRequest(randomHttpMethod(getRandom()), retryEndpoint); + restClient.performRequest(new Request(randomHttpMethod(getRandom()), retryEndpoint)); fail("request should have failed"); } catch (ResponseException e) { /* @@ -237,7 +236,7 @@ public class RestClientMultipleHostsTests extends RestClientTestCase { for (int j = 0; j < nodes.size(); j++) { retryEndpoint = randomErrorRetryEndpoint(); try { - restClient.performRequest(randomHttpMethod(getRandom()), retryEndpoint); + restClient.performRequest(new Request(randomHttpMethod(getRandom()), retryEndpoint)); fail("request should have failed"); } catch (ResponseException e) { Response response = e.getResponse(); @@ -269,7 +268,7 @@ public class RestClientMultipleHostsTests extends RestClientTestCase { int statusCode = randomErrorNoRetryStatusCode(getRandom()); Response response; try { - response = restClient.performRequest(randomHttpMethod(getRandom()), "/" + statusCode); + response = restClient.performRequest(new Request(randomHttpMethod(getRandom()), "/" + statusCode)); } catch (ResponseException e) { response = e.getResponse(); } @@ -286,7 +285,7 @@ public class RestClientMultipleHostsTests extends RestClientTestCase { for (int y = 0; y < i + 1; y++) { retryEndpoint = randomErrorRetryEndpoint(); try { - restClient.performRequest(randomHttpMethod(getRandom()), retryEndpoint); + restClient.performRequest(new Request(randomHttpMethod(getRandom()), retryEndpoint)); fail("request should have failed"); } catch (ResponseException e) { Response response = e.getResponse(); @@ -323,6 +322,7 @@ public class RestClientMultipleHostsTests extends RestClientTestCase { assertTrue(found); } }; + RestClient restClient = createRestClient(firstPositionOnly); int rounds = between(1, 10); for (int i = 0; i < rounds; i++) { /* @@ -330,18 +330,16 @@ public class RestClientMultipleHostsTests extends RestClientTestCase { * NodeSelector overrides the round robin behavior. */ Request request = new Request("GET", "/200"); - RequestOptions.Builder options = request.getOptions().toBuilder(); - options.setNodeSelector(firstPositionOnly); - request.setOptions(options); Response response = restClient.performRequest(request); assertEquals(nodes.get(0).getHost(), response.getHost()); } } public void testSetNodes() throws IOException { + RestClient restClient = createRestClient(NodeSelector.SKIP_DEDICATED_MASTERS); List newNodes = new ArrayList<>(nodes.size()); for (int i = 0; i < nodes.size(); i++) { - Roles roles = i == 0 ? new Roles(false, true, true) : new Roles(true, false, false); + Node.Roles roles = i == 0 ? new Node.Roles(false, true, true) : new Node.Roles(true, false, false); newNodes.add(new Node(nodes.get(i).getHost(), null, null, null, roles, null)); } restClient.setNodes(newNodes); @@ -352,9 +350,6 @@ public class RestClientMultipleHostsTests extends RestClientTestCase { * NodeSelector overrides the round robin behavior. */ Request request = new Request("GET", "/200"); - RequestOptions.Builder options = request.getOptions().toBuilder(); - options.setNodeSelector(NodeSelector.NOT_MASTER_ONLY); - request.setOptions(options); Response response = restClient.performRequest(request); assertEquals(newNodes.get(0).getHost(), response.getHost()); } diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java index 5987fe7dd98..6b7725666d4 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java @@ -150,7 +150,7 @@ public class RestClientSingleHostTests extends RestClientTestCase { node = new Node(new HttpHost("localhost", 9200)); failureListener = new HostsTrackingFailureListener(); restClient = new RestClient(httpClient, 10000, defaultHeaders, - singletonList(node), null, failureListener); + singletonList(node), null, failureListener, NodeSelector.ANY); } /** diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java index 04742ccab4f..030c2fca627 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java @@ -54,7 +54,7 @@ public class RestClientTests extends RestClientTestCase { public void testCloseIsIdempotent() throws IOException { List nodes = singletonList(new Node(new HttpHost("localhost", 9200))); CloseableHttpAsyncClient closeableHttpAsyncClient = mock(CloseableHttpAsyncClient.class); - RestClient restClient = new RestClient(closeableHttpAsyncClient, 1_000, new Header[0], nodes, null, null); + RestClient restClient = new RestClient(closeableHttpAsyncClient, 1_000, new Header[0], nodes, null, null, null); restClient.close(); verify(closeableHttpAsyncClient, times(1)).close(); restClient.close(); @@ -475,7 +475,7 @@ public class RestClientTests extends RestClientTestCase { private static RestClient createRestClient() { List nodes = Collections.singletonList(new Node(new HttpHost("localhost", 9200))); return new RestClient(mock(CloseableHttpAsyncClient.class), randomLongBetween(1_000, 30_000), - new Header[] {}, nodes, null, null); + new Header[] {}, nodes, null, null, null); } diff --git a/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java b/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java index d3a0202747d..d347353a1fb 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java +++ b/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java @@ -36,7 +36,6 @@ import org.apache.http.nio.entity.NStringEntity; import org.apache.http.ssl.SSLContextBuilder; import org.apache.http.ssl.SSLContexts; import org.apache.http.util.EntityUtils; -import org.elasticsearch.client.HasAttributeNodeSelector; import org.elasticsearch.client.HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory; import org.elasticsearch.client.Node; import org.elasticsearch.client.NodeSelector; @@ -54,6 +53,7 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.security.KeyStore; +import java.util.Iterator; import java.util.concurrent.CountDownLatch; /** @@ -82,8 +82,7 @@ public class RestClientDocumentation { static { RequestOptions.Builder builder = RequestOptions.DEFAULT.toBuilder(); builder.addHeader("Authorization", "Bearer " + TOKEN); // <1> - builder.setNodeSelector(NodeSelector.NOT_MASTER_ONLY); // <2> - builder.setHttpAsyncResponseConsumerFactory( // <3> + builder.setHttpAsyncResponseConsumerFactory( // <2> new HeapBufferedResponseConsumerFactory(30 * 1024 * 1024 * 1024)); COMMON_OPTIONS = builder.build(); } @@ -115,6 +114,45 @@ public class RestClientDocumentation { builder.setMaxRetryTimeoutMillis(10000); // <1> //end::rest-client-init-max-retry-timeout } + { + //tag::rest-client-init-node-selector + RestClientBuilder builder = RestClient.builder(new HttpHost("localhost", 9200, "http")); + builder.setNodeSelector(NodeSelector.SKIP_DEDICATED_MASTERS); // <1> + //end::rest-client-init-node-selector + } + { + //tag::rest-client-init-allocation-aware-selector + RestClientBuilder builder = RestClient.builder(new HttpHost("localhost", 9200, "http")); + builder.setNodeSelector(new NodeSelector() { // <1> + @Override + public void select(Iterable nodes) { + /* + * Prefer any node that belongs to rack_one. If none is around + * we will go to another rack till it's time to try and revive + * some of the nodes that belong to rack_one. + */ + boolean foundOne = false; + for (Node node : nodes) { + String rackId = node.getAttributes().get("rack_id").get(0); + if ("rack_one".equals(rackId)) { + foundOne = true; + break; + } + } + if (foundOne) { + Iterator nodesIt = nodes.iterator(); + while (nodesIt.hasNext()) { + Node node = nodesIt.next(); + String rackId = node.getAttributes().get("rack_id").get(0); + if ("rack_one".equals(rackId) == false) { + nodesIt.remove(); + } + } + } + } + }); + //end::rest-client-init-allocation-aware-selector + } { //tag::rest-client-init-failure-listener RestClientBuilder builder = RestClient.builder(new HttpHost("localhost", 9200, "http")); @@ -198,13 +236,6 @@ public class RestClientDocumentation { request.setOptions(options); //end::rest-client-options-customize-header } - { - //tag::rest-client-options-customize-attribute - RequestOptions.Builder options = COMMON_OPTIONS.toBuilder(); - options.setNodeSelector(new HasAttributeNodeSelector("rack", "c12")); // <1> - request.setOptions(options); - //end::rest-client-options-customize-attribute - } } { HttpEntity[] documents = new HttpEntity[10]; diff --git a/docs/java-rest/low-level/configuration.asciidoc b/docs/java-rest/low-level/configuration.asciidoc index b0753496558..0b58c82724b 100644 --- a/docs/java-rest/low-level/configuration.asciidoc +++ b/docs/java-rest/low-level/configuration.asciidoc @@ -99,3 +99,30 @@ http://docs.oracle.com/javase/8/docs/technotes/guides/net/properties.html[`netwo to your http://docs.oracle.com/javase/8/docs/technotes/guides/security/PolicyFiles.html[Java security policy]. + +=== Node selector + +The client sends each request to one of the configured nodes in round-robin +fashion. Nodes can optionally be filtered through a node selector that needs +to be provided when initializing the client. This is useful when sniffing is +enabled, in case only dedicated master nodes should be hit by HTTP requests. +For each request the client will run the eventually configured node selector +to filter the node candidates, then select the next one in the list out of the +remaining ones. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-init-allocation-aware-selector] +-------------------------------------------------- +<1> Set an allocation aware node selector that allows to pick a node in the +local rack if any available, otherwise go to any other node in any rack. It +acts as a preference rather than a strict requirement, given that it goes to +another rack if none of the local nodes are available, rather than returning +no nodes in such case which would make the client forcibly revive a local node +whenever none of the nodes from the preferred rack is available. + +WARNING: Node selectors that do not consistently select the same set of nodes +will make round-robin behaviour unpredictable and possibly unfair. The +preference example above is fine as it reasons about availability of nodes +which already affects the predictability of round-robin. Node selection should +not depend on other external factors or round-robin will not work properly. diff --git a/docs/java-rest/low-level/usage.asciidoc b/docs/java-rest/low-level/usage.asciidoc index 1f8b302715f..71fadd98988 100644 --- a/docs/java-rest/low-level/usage.asciidoc +++ b/docs/java-rest/low-level/usage.asciidoc @@ -196,6 +196,16 @@ include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-init-failur <1> Set a listener that gets notified every time a node fails, in case actions need to be taken. Used internally when sniffing on failure is enabled. +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-init-node-selector] +-------------------------------------------------- +<1> Set the node selector to be used to filter the nodes the client will send +requests to among the ones that are set to the client itself. This is useful +for instance to prevent sending requests to dedicated master nodes when +sniffing is enabled. By default the client sends requests to every configured +node. + ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-init-request-config-callback] @@ -283,8 +293,7 @@ instance and share it between all requests: include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-options-singleton] -------------------------------------------------- <1> Add any headers needed by all requests. -<2> Set a `NodeSelector`. -<3> Customize the response consumer. +<2> Customize the response consumer. `addHeader` is for headers that are required for authorization or to work with a proxy in front of Elasticsearch. There is no need to set the `Content-Type` @@ -315,15 +324,6 @@ adds an extra header: include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-options-customize-header] -------------------------------------------------- -Or you can send requests to nodes with a particular attribute: - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-options-customize-attribute] --------------------------------------------------- -<1> Replace the node selector with one that selects nodes on a particular rack. - - ==== Multiple parallel asynchronous actions The client is quite happy to execute many actions in parallel. The following diff --git a/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java b/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java index 02bc304317e..a8dd91e8b6d 100644 --- a/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java +++ b/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java @@ -91,8 +91,9 @@ public class DocsClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { final RestClient restClient, final List hosts, final Version esVersion, - final Version masterVersion) throws IOException { - return new ClientYamlDocsTestClient(restSpec, restClient, hosts, esVersion, masterVersion); + final Version masterVersion) { + return new ClientYamlDocsTestClient(restSpec, restClient, hosts, esVersion, masterVersion, + restClientBuilder -> configureClient(restClientBuilder, restClientSettings())); } /** diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java index df92b101bf1..672d19d5dc2 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java @@ -30,7 +30,6 @@ import org.apache.http.entity.StringEntity; import org.apache.http.message.BasicHeader; import org.apache.http.nio.conn.ssl.SSLIOSessionStrategy; import org.apache.http.ssl.SSLContexts; -import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksAction; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; @@ -47,6 +46,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESTestCase; import org.junit.After; @@ -381,6 +381,11 @@ public abstract class ESRestTestCase extends ESTestCase { protected RestClient buildClient(Settings settings, HttpHost[] hosts) throws IOException { RestClientBuilder builder = RestClient.builder(hosts); + configureClient(builder, settings); + return builder.build(); + } + + protected static void configureClient(RestClientBuilder builder, Settings settings) throws IOException { String keystorePath = settings.get(TRUSTSTORE_PATH); if (keystorePath != null) { final String keystorePass = settings.get(TRUSTSTORE_PASSWORD); @@ -399,11 +404,10 @@ public abstract class ESRestTestCase extends ESTestCase { SSLContext sslcontext = SSLContexts.custom().loadTrustMaterial(keyStore, null).build(); SSLIOSessionStrategy sessionStrategy = new SSLIOSessionStrategy(sslcontext); builder.setHttpClientConfigCallback(httpClientBuilder -> httpClientBuilder.setSSLStrategy(sessionStrategy)); - } catch (KeyStoreException|NoSuchAlgorithmException|KeyManagementException|CertificateException e) { + } catch (KeyStoreException |NoSuchAlgorithmException |KeyManagementException |CertificateException e) { throw new RuntimeException("Error setting up ssl", e); } } - try (ThreadContext threadContext = new ThreadContext(settings)) { Header[] defaultHeaders = new Header[threadContext.getHeaders().size()]; int i = 0; @@ -412,7 +416,6 @@ public abstract class ESRestTestCase extends ESTestCase { } builder.setDefaultHeaders(defaultHeaders); } - final String requestTimeoutString = settings.get(CLIENT_RETRY_TIMEOUT); if (requestTimeoutString != null) { final TimeValue maxRetryTimeout = TimeValue.parseTimeValue(requestTimeoutString, CLIENT_RETRY_TIMEOUT); @@ -423,7 +426,6 @@ public abstract class ESRestTestCase extends ESTestCase { final TimeValue socketTimeout = TimeValue.parseTimeValue(socketTimeoutString, CLIENT_SOCKET_TIMEOUT); builder.setRequestConfigCallback(conf -> conf.setSocketTimeout(Math.toIntExact(socketTimeout.getMillis()))); } - return builder.build(); } @SuppressWarnings("unchecked") diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlDocsTestClient.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlDocsTestClient.java index 33443aa5b6e..ddd58376635 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlDocsTestClient.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlDocsTestClient.java @@ -27,6 +27,8 @@ import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; +import org.elasticsearch.client.RestClientBuilder; +import org.elasticsearch.common.CheckedConsumer; import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestSpec; import java.io.IOException; @@ -47,8 +49,9 @@ public final class ClientYamlDocsTestClient extends ClientYamlTestClient { final RestClient restClient, final List hosts, final Version esVersion, - final Version masterVersion) throws IOException { - super(restSpec, restClient, hosts, esVersion, masterVersion); + final Version masterVersion, + final CheckedConsumer clientBuilderConsumer) { + super(restSpec, restClient, hosts, esVersion, masterVersion, clientBuilderConsumer); } @Override @@ -66,9 +69,9 @@ public final class ClientYamlDocsTestClient extends ClientYamlTestClient { request.addParameter(param.getKey(), param.getValue()); } request.setEntity(entity); - setOptions(request, headers, nodeSelector); + setOptions(request, headers); try { - Response response = restClient.performRequest(request); + Response response = getRestClient(nodeSelector).performRequest(request); return new ClientYamlTestResponse(response); } catch (ResponseException e) { throw new ClientYamlTestResponseException(e); diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java index 99da6614028..fdc10a1a246 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java @@ -26,18 +26,22 @@ import org.apache.http.entity.ContentType; import org.apache.http.util.EntityUtils; import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; +import org.elasticsearch.client.Node; import org.elasticsearch.client.NodeSelector; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; +import org.elasticsearch.client.RestClientBuilder; +import org.elasticsearch.common.CheckedConsumer; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestApi; import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestPath; import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestSpec; import java.io.IOException; +import java.io.UncheckedIOException; import java.net.URI; import java.net.URISyntaxException; import java.util.HashMap; @@ -58,21 +62,24 @@ public class ClientYamlTestClient { private static final ContentType YAML_CONTENT_TYPE = ContentType.create("application/yaml"); private final ClientYamlSuiteRestSpec restSpec; - protected final RestClient restClient; + protected final Map restClients = new HashMap<>(); private final Version esVersion; private final Version masterVersion; + private final CheckedConsumer clientBuilderConsumer; public ClientYamlTestClient( final ClientYamlSuiteRestSpec restSpec, final RestClient restClient, final List hosts, final Version esVersion, - final Version masterVersion) throws IOException { + final Version masterVersion, + final CheckedConsumer clientBuilderConsumer) { assert hosts.size() > 0; this.restSpec = restSpec; - this.restClient = restClient; + this.restClients.put(NodeSelector.ANY, restClient); this.esVersion = esVersion; this.masterVersion = masterVersion; + this.clientBuilderConsumer = clientBuilderConsumer; } public Version getEsVersion() { @@ -172,30 +179,43 @@ public class ClientYamlTestClient { requestPath = finalPath.toString(); } - - logger.debug("calling api [{}]", apiName); Request request = new Request(requestMethod, requestPath); for (Map.Entry param : queryStringParams.entrySet()) { request.addParameter(param.getKey(), param.getValue()); } request.setEntity(entity); - setOptions(request, headers, nodeSelector); + setOptions(request, headers); + try { - Response response = restClient.performRequest(request); + Response response = getRestClient(nodeSelector).performRequest(request); return new ClientYamlTestResponse(response); } catch(ResponseException e) { throw new ClientYamlTestResponseException(e); } } - protected static void setOptions(Request request, Map headers, NodeSelector nodeSelector) { + protected RestClient getRestClient(NodeSelector nodeSelector) { + //lazily build a new client in case we need to point to some specific node + return restClients.computeIfAbsent(nodeSelector, selector -> { + RestClient anyClient = restClients.get(NodeSelector.ANY); + RestClientBuilder builder = RestClient.builder(anyClient.getNodes().toArray(new Node[0])); + try { + clientBuilderConsumer.accept(builder); + } catch(IOException e) { + throw new UncheckedIOException(e); + } + builder.setNodeSelector(nodeSelector); + return builder.build(); + }); + } + + protected static void setOptions(Request request, Map headers) { RequestOptions.Builder options = request.getOptions().toBuilder(); for (Map.Entry header : headers.entrySet()) { logger.debug("Adding header {} with value {}", header.getKey(), header.getValue()); options.addHeader(header.getKey(), header.getValue()); } - options.setNodeSelector(nodeSelector); request.setOptions(options); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java index c0b5b1e9588..6afc123520b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java @@ -47,6 +47,7 @@ import java.nio.file.Files; import java.nio.file.Path; import java.util.ArrayList; import java.util.Collections; +import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -122,7 +123,7 @@ public abstract class ESClientYamlSuiteTestCase extends ESRestTestCase { public void initAndResetContext() throws Exception { if (restTestExecutionContext == null) { // Sniff host metadata in case we need it in the yaml tests - List nodesWithMetadata = sniffHostMetadata(adminClient()); + List nodesWithMetadata = sniffHostMetadata(); client().setNodes(nodesWithMetadata); adminClient().setNodes(nodesWithMetadata); @@ -163,8 +164,9 @@ public abstract class ESClientYamlSuiteTestCase extends ESRestTestCase { final RestClient restClient, final List hosts, final Version esVersion, - final Version masterVersion) throws IOException { - return new ClientYamlTestClient(restSpec, restClient, hosts, esVersion, masterVersion); + final Version masterVersion) { + return new ClientYamlTestClient(restSpec, restClient, hosts, esVersion, masterVersion, + restClientBuilder -> configureClient(restClientBuilder, restClientSettings())); } /** @@ -195,8 +197,7 @@ public abstract class ESClientYamlSuiteTestCase extends ESRestTestCase { } //sort the candidates so they will always be in the same order before being shuffled, for repeatability - Collections.sort(tests, - (o1, o2) -> ((ClientYamlTestCandidate)o1[0]).getTestPath().compareTo(((ClientYamlTestCandidate)o2[0]).getTestPath())); + tests.sort(Comparator.comparing(o -> ((ClientYamlTestCandidate) o[0]).getTestPath())); return tests; } @@ -401,7 +402,7 @@ public abstract class ESClientYamlSuiteTestCase extends ESRestTestCase { /** * Sniff the cluster for host metadata. */ - private List sniffHostMetadata(RestClient client) throws IOException { + private List sniffHostMetadata() throws IOException { ElasticsearchNodesSniffer.Scheme scheme = ElasticsearchNodesSniffer.Scheme.valueOf(getProtocol().toUpperCase(Locale.ROOT)); ElasticsearchNodesSniffer sniffer = new ElasticsearchNodesSniffer( diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java index 87f2d7f9a53..5da8601a9f3 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java @@ -73,7 +73,7 @@ public class ClientYamlTestSectionTests extends AbstractClientYamlTestFragmentPa section.setSkipSection(new SkipSection(null, singletonList("node_selector"), null)); DoSection doSection = new DoSection(new XContentLocation(lineNumber, 0)); ApiCallSection apiCall = new ApiCallSection("test"); - apiCall.setNodeSelector(NodeSelector.NOT_MASTER_ONLY); + apiCall.setNodeSelector(NodeSelector.SKIP_DEDICATED_MASTERS); doSection.setApiCallSection(apiCall); section.addExecutableSection(doSection); } @@ -84,7 +84,7 @@ public class ClientYamlTestSectionTests extends AbstractClientYamlTestFragmentPa section.setSkipSection(new SkipSection(null, singletonList("yaml"), null)); DoSection doSection = new DoSection(new XContentLocation(lineNumber, 0)); ApiCallSection apiCall = new ApiCallSection("test"); - apiCall.setNodeSelector(NodeSelector.NOT_MASTER_ONLY); + apiCall.setNodeSelector(NodeSelector.SKIP_DEDICATED_MASTERS); doSection.setApiCallSection(apiCall); Exception e = expectThrows(IllegalArgumentException.class, () -> section.addExecutableSection(doSection)); assertEquals("Attempted to add a [do] with a [node_selector] section without a corresponding" diff --git a/x-pack/docs/src/test/java/org/elasticsearch/smoketest/XDocsClientYamlTestSuiteIT.java b/x-pack/docs/src/test/java/org/elasticsearch/smoketest/XDocsClientYamlTestSuiteIT.java index af9fb45b8a0..0196406c478 100644 --- a/x-pack/docs/src/test/java/org/elasticsearch/smoketest/XDocsClientYamlTestSuiteIT.java +++ b/x-pack/docs/src/test/java/org/elasticsearch/smoketest/XDocsClientYamlTestSuiteIT.java @@ -20,7 +20,6 @@ import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestSpec; import org.elasticsearch.xpack.test.rest.XPackRestIT; import org.junit.After; -import java.io.IOException; import java.util.List; import java.util.Map; @@ -58,8 +57,9 @@ public class XDocsClientYamlTestSuiteIT extends XPackRestIT { final RestClient restClient, final List hosts, final Version esVersion, - final Version masterVersion) throws IOException { - return new ClientYamlDocsTestClient(restSpec, restClient, hosts, esVersion, masterVersion); + final Version masterVersion) { + return new ClientYamlDocsTestClient(restSpec, restClient, hosts, esVersion, masterVersion, + restClientBuilder -> configureClient(restClientBuilder, restClientSettings())); } /** From 3c42bfad4e68c464ee57420f39e390efd2888761 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Fri, 22 Jun 2018 17:24:27 +0200 Subject: [PATCH 7/8] Fix Mockito trying to mock IOException that isn't thrown by method (#31433) (#31527) --- .../xpack/monitoring/exporter/http/HttpExporterTests.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterTests.java index ff83621119e..a96dc8ebb12 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterTests.java @@ -460,7 +460,6 @@ public class HttpExporterTests extends ESTestCase { } } - @AwaitsFix (bugUrl = "https://github.com/elastic/elasticsearch/issues/31433" ) public void testHttpExporterShutdown() throws Exception { final Config config = createConfig(Settings.EMPTY); final RestClient client = mock(RestClient.class); @@ -469,7 +468,7 @@ public class HttpExporterTests extends ESTestCase { final MultiHttpResource resource = mock(MultiHttpResource.class); if (sniffer != null && rarely()) { - doThrow(randomFrom(new IOException("expected"), new RuntimeException("expected"))).when(sniffer).close(); + doThrow(new RuntimeException("expected")).when(sniffer).close(); } if (rarely()) { From 7313a987f4a8ba1e39d6105f7d74be9186faa95b Mon Sep 17 00:00:00 2001 From: Vladimir Dolzhenko Date: Fri, 22 Jun 2018 17:44:13 +0200 Subject: [PATCH 8/8] fix repository update with the same settings but different type (#31458) fix repository update with the same settings but different type --- .../repositories/RepositoriesService.java | 2 +- .../repositories/RepositoriesServiceIT.java | 96 +++++++++++++++++++ 2 files changed, 97 insertions(+), 1 deletion(-) create mode 100644 server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceIT.java diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java b/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java index 636e108468e..d5b2a6413e9 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java @@ -349,7 +349,7 @@ public class RepositoriesService extends AbstractComponent implements ClusterSta Repository previous = repositories.get(repositoryMetaData.name()); if (previous != null) { RepositoryMetaData previousMetadata = previous.getMetadata(); - if (!previousMetadata.type().equals(repositoryMetaData.type()) && previousMetadata.settings().equals(repositoryMetaData.settings())) { + if (previousMetadata.equals(repositoryMetaData)) { // Previous version is the same as this one - ignore it return false; } diff --git a/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceIT.java b/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceIT.java new file mode 100644 index 00000000000..05c9746aa49 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceIT.java @@ -0,0 +1,96 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.repositories; + +import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.RepositoryMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.repositories.fs.FsRepository; +import org.elasticsearch.snapshots.mockstore.MockRepository; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.InternalTestCluster; + +import java.util.Collection; +import java.util.Collections; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.sameInstance; + +public class RepositoriesServiceIT extends ESIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return Collections.singletonList(MockRepository.Plugin.class); + } + + public void testUpdateRepository() { + final InternalTestCluster cluster = internalCluster(); + + final String repositoryName = "test-repo"; + + final Client client = client(); + final RepositoriesService repositoriesService = + cluster.getDataOrMasterNodeInstances(RepositoriesService.class).iterator().next(); + final Settings settings = cluster.getDefaultSettings(); + + final Settings.Builder repoSettings = Settings.builder().put("location", randomRepoPath()); + + assertAcked(client.admin().cluster().preparePutRepository(repositoryName) + .setType(FsRepository.TYPE) + .setSettings(repoSettings) + .get()); + + final GetRepositoriesResponse originalGetRepositoriesResponse = + client.admin().cluster().prepareGetRepositories(repositoryName).get(); + + assertThat(originalGetRepositoriesResponse.repositories(), hasSize(1)); + RepositoryMetaData originalRepositoryMetaData = originalGetRepositoriesResponse.repositories().get(0); + + assertThat(originalRepositoryMetaData.type(), equalTo(FsRepository.TYPE)); + + final Repository originalRepository = repositoriesService.repository(repositoryName); + assertThat(originalRepository, instanceOf(FsRepository.class)); + + final boolean updated = randomBoolean(); + final String updatedRepositoryType = updated ? "mock" : FsRepository.TYPE; + + assertAcked(client.admin().cluster().preparePutRepository(repositoryName) + .setType(updatedRepositoryType) + .setSettings(repoSettings) + .get()); + + final GetRepositoriesResponse updatedGetRepositoriesResponse = + client.admin().cluster().prepareGetRepositories(repositoryName).get(); + + assertThat(updatedGetRepositoriesResponse.repositories(), hasSize(1)); + final RepositoryMetaData updatedRepositoryMetaData = updatedGetRepositoriesResponse.repositories().get(0); + + assertThat(updatedRepositoryMetaData.type(), equalTo(updatedRepositoryType)); + + final Repository updatedRepository = repositoriesService.repository(repositoryName); + assertThat(updatedRepository, updated ? not(sameInstance(originalRepository)) : sameInstance(originalRepository)); + } +}