From bed7e68014f85507a941d28d1bae64e1190bcfe7 Mon Sep 17 00:00:00 2001 From: Julie Tibshirani Date: Thu, 27 Jun 2019 08:27:01 +0300 Subject: [PATCH 01/42] Make the ignore_above docs tests more robust. (#43349) It is possible for internal ML indices like `.data-frame-notifications-1` to leak, causing other docs tests to fail when they accidentally search over these indices. This PR updates the ignore_above tests to only search a specific index. --- docs/reference/mapping/params/ignore-above.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/mapping/params/ignore-above.asciidoc b/docs/reference/mapping/params/ignore-above.asciidoc index daf5c92bcf3..33c0eaf339f 100644 --- a/docs/reference/mapping/params/ignore-above.asciidoc +++ b/docs/reference/mapping/params/ignore-above.asciidoc @@ -30,7 +30,7 @@ PUT my_index/_doc/2 <3> "message": "Syntax error with some long stacktrace" } -GET _search <4> +GET my_index/_search <4> { "aggs": { "messages": { From 05a7333eca2f3f7060c68e636f0296acc5be3543 Mon Sep 17 00:00:00 2001 From: Alan Woodward Date: Thu, 27 Jun 2019 08:56:26 +0100 Subject: [PATCH 02/42] Require [articles] setting in elision filter (#43083) We should throw an exception at construction time if a list of articles is not provided, otherwise we can get random NPEs during indexing. Relates to #43002 --- .../tokenfilters/elision-tokenfilter.asciidoc | 5 ++- .../analysis/common/CommonAnalysisPlugin.java | 2 +- .../common/ElisionTokenFilterFactory.java | 3 ++ .../common/ElisionFilterFactoryTests.java | 43 +++++++++++++++++++ .../test/analysis-common/40_token_filters.yml | 14 ++++++ 5 files changed, 64 insertions(+), 3 deletions(-) create mode 100644 modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/ElisionFilterFactoryTests.java diff --git a/docs/reference/analysis/tokenfilters/elision-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/elision-tokenfilter.asciidoc index 2ff19cebe89..34646a0413e 100644 --- a/docs/reference/analysis/tokenfilters/elision-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/elision-tokenfilter.asciidoc @@ -4,8 +4,9 @@ A token filter which removes elisions. For example, "l'avion" (the plane) will tokenized as "avion" (plane). -Accepts `articles` parameter which is a set of stop words articles. Also accepts -`articles_case`, which indicates whether the filter treats those articles as +Requires either an `articles` parameter which is a set of stop word articles, or +`articles_path` which points to a text file containing the stop set. Also optionally +accepts `articles_case`, which indicates whether the filter treats those articles as case sensitive. For example: diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java index c2886408437..b438ca52e05 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java @@ -239,7 +239,7 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin, Scri filters.put("dutch_stem", DutchStemTokenFilterFactory::new); filters.put("edge_ngram", EdgeNGramTokenFilterFactory::new); filters.put("edgeNGram", EdgeNGramTokenFilterFactory::new); - filters.put("elision", ElisionTokenFilterFactory::new); + filters.put("elision", requiresAnalysisSettings(ElisionTokenFilterFactory::new)); filters.put("fingerprint", FingerprintTokenFilterFactory::new); filters.put("flatten_graph", FlattenGraphTokenFilterFactory::new); filters.put("french_stem", FrenchStemTokenFilterFactory::new); diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ElisionTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ElisionTokenFilterFactory.java index 52cb69952b8..39d042caa8c 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ElisionTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ElisionTokenFilterFactory.java @@ -36,6 +36,9 @@ public class ElisionTokenFilterFactory extends AbstractTokenFilterFactory implem ElisionTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, name, settings); this.articles = Analysis.parseArticles(env, settings); + if (this.articles == null) { + throw new IllegalArgumentException("elision filter requires [articles] or [articles_path] setting"); + } } @Override diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/ElisionFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/ElisionFilterFactoryTests.java new file mode 100644 index 00000000000..dbfd49d5649 --- /dev/null +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/ElisionFilterFactoryTests.java @@ -0,0 +1,43 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.analysis.common; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.index.analysis.AnalysisTestsHelper; +import org.elasticsearch.test.ESTokenStreamTestCase; + +import java.io.IOException; + +public class ElisionFilterFactoryTests extends ESTokenStreamTestCase { + + public void testElisionFilterWithNoArticles() throws IOException { + Settings settings = Settings.builder() + .put("index.analysis.filter.elision.type", "elision") + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .build(); + + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisPlugin())); + + assertEquals("elision filter requires [articles] or [articles_path] setting", e.getMessage()); + } + +} diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/40_token_filters.yml b/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/40_token_filters.yml index d6fe6b9a980..63658d486a1 100644 --- a/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/40_token_filters.yml +++ b/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/40_token_filters.yml @@ -587,6 +587,20 @@ - length: { tokens: 1 } - match: { tokens.0.token: avion } + - do: + catch: bad_request + indices.create: + index: test2 + body: + settings: + analysis: + filter: + my_elision: + type: elision + - match: { status: 400 } + - match: { error.type: illegal_argument_exception } + - match: { error.reason: "elision filter requires [articles] or [articles_path] setting" } + --- "stemmer": - do: From 8ff5519b11df4768aafa199a9f5ff370584be444 Mon Sep 17 00:00:00 2001 From: Alan Woodward Date: Thu, 27 Jun 2019 09:01:53 +0100 Subject: [PATCH 03/42] Use preconfigured filters correctly in Analyze API (#43568) When a named token filter or char filter is passed as part of an Analyze API request with no index, we currently try and build the relevant filter using no index settings. However, this can miss cases where there is a pre-configured filter defined in the analysis registry. One example here is the elision filter, which has a pre-configured version built with the french elision set; when used as part of normal analysis, this preconfigured set is used, but when used as part of the Analyze API we end up with NPEs because it tries to instantiate the filter with no index settings. This commit changes the Analyze API to check for pre-configured filters in the case that the request has no index defined, and is using a name rather than a custom definition for a filter. It also changes the pre-configured `word_delimiter_graph` filter and `edge_ngram` tokenizer to make their settings consistent with the defaults used when creating them with no settings Closes #43002 Closes #43621 Closes #43582 --- .../analysis/common/CommonAnalysisPlugin.java | 26 +++-- .../common/EdgeNGramTokenizerTests.java | 98 +++++++++++++++++++ ...DelimiterGraphTokenFilterFactoryTests.java | 57 +++++++++++ .../index/analysis/AnalysisRegistry.java | 20 ++-- .../indices/TransportAnalyzeActionTests.java | 61 ++++++------ 5 files changed, 219 insertions(+), 43 deletions(-) create mode 100644 modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/EdgeNGramTokenizerTests.java diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java index b438ca52e05..f5eb6540a75 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java @@ -83,6 +83,7 @@ import org.apache.lucene.analysis.miscellaneous.TrimFilter; import org.apache.lucene.analysis.miscellaneous.TruncateTokenFilter; import org.apache.lucene.analysis.miscellaneous.WordDelimiterFilter; import org.apache.lucene.analysis.miscellaneous.WordDelimiterGraphFilter; +import org.apache.lucene.analysis.miscellaneous.WordDelimiterIterator; import org.apache.lucene.analysis.ngram.EdgeNGramTokenFilter; import org.apache.lucene.analysis.ngram.EdgeNGramTokenizer; import org.apache.lucene.analysis.ngram.NGramTokenFilter; @@ -110,6 +111,7 @@ import org.apache.lucene.analysis.tr.ApostropheFilter; import org.apache.lucene.analysis.tr.TurkishAnalyzer; import org.apache.lucene.analysis.util.ElisionFilter; import org.apache.lucene.util.SetOnce; +import org.elasticsearch.Version; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -488,13 +490,15 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin, Scri | WordDelimiterFilter.SPLIT_ON_CASE_CHANGE | WordDelimiterFilter.SPLIT_ON_NUMERICS | WordDelimiterFilter.STEM_ENGLISH_POSSESSIVE, null))); - filters.add(PreConfiguredTokenFilter.singleton("word_delimiter_graph", false, input -> - new WordDelimiterGraphFilter(input, + filters.add(PreConfiguredTokenFilter.singletonWithVersion("word_delimiter_graph", false, (input, version) -> { + boolean adjustOffsets = version.onOrAfter(Version.V_7_3_0); + return new WordDelimiterGraphFilter(input, adjustOffsets, WordDelimiterIterator.DEFAULT_WORD_DELIM_TABLE, WordDelimiterGraphFilter.GENERATE_WORD_PARTS | WordDelimiterGraphFilter.GENERATE_NUMBER_PARTS | WordDelimiterGraphFilter.SPLIT_ON_CASE_CHANGE | WordDelimiterGraphFilter.SPLIT_ON_NUMERICS - | WordDelimiterGraphFilter.STEM_ENGLISH_POSSESSIVE, null))); + | WordDelimiterGraphFilter.STEM_ENGLISH_POSSESSIVE, null); + })); return filters; } @@ -508,8 +512,12 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin, Scri tokenizers.add(PreConfiguredTokenizer.singleton("letter", LetterTokenizer::new)); tokenizers.add(PreConfiguredTokenizer.singleton("whitespace", WhitespaceTokenizer::new)); tokenizers.add(PreConfiguredTokenizer.singleton("ngram", NGramTokenizer::new)); - tokenizers.add(PreConfiguredTokenizer.singleton("edge_ngram", - () -> new EdgeNGramTokenizer(EdgeNGramTokenizer.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenizer.DEFAULT_MAX_GRAM_SIZE))); + tokenizers.add(PreConfiguredTokenizer.elasticsearchVersion("edge_ngram", (version) -> { + if (version.onOrAfter(Version.V_7_3_0)) { + return new EdgeNGramTokenizer(NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE, NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE); + } + return new EdgeNGramTokenizer(EdgeNGramTokenizer.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenizer.DEFAULT_MAX_GRAM_SIZE); + })); tokenizers.add(PreConfiguredTokenizer.singleton("pattern", () -> new PatternTokenizer(Regex.compile("\\W+", null), -1))); tokenizers.add(PreConfiguredTokenizer.singleton("thai", ThaiTokenizer::new)); // TODO deprecate and remove in API @@ -518,8 +526,12 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin, Scri // Temporary shim for aliases. TODO deprecate after they are moved tokenizers.add(PreConfiguredTokenizer.singleton("nGram", NGramTokenizer::new)); - tokenizers.add(PreConfiguredTokenizer.singleton("edgeNGram", - () -> new EdgeNGramTokenizer(EdgeNGramTokenizer.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenizer.DEFAULT_MAX_GRAM_SIZE))); + tokenizers.add(PreConfiguredTokenizer.elasticsearchVersion("edgeNGram", (version) -> { + if (version.onOrAfter(Version.V_7_3_0)) { + return new EdgeNGramTokenizer(NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE, NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE); + } + return new EdgeNGramTokenizer(EdgeNGramTokenizer.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenizer.DEFAULT_MAX_GRAM_SIZE); + })); tokenizers.add(PreConfiguredTokenizer.singleton("PathHierarchy", PathHierarchyTokenizer::new)); return tokenizers; diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/EdgeNGramTokenizerTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/EdgeNGramTokenizerTests.java new file mode 100644 index 00000000000..0172f7cbc26 --- /dev/null +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/EdgeNGramTokenizerTests.java @@ -0,0 +1,98 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.analysis.common; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.IndexAnalyzers; +import org.elasticsearch.index.analysis.NamedAnalyzer; +import org.elasticsearch.indices.analysis.AnalysisModule; +import org.elasticsearch.test.ESTokenStreamTestCase; +import org.elasticsearch.test.IndexSettingsModule; +import org.elasticsearch.test.VersionUtils; + +import java.io.IOException; +import java.util.Collections; + +public class EdgeNGramTokenizerTests extends ESTokenStreamTestCase { + + private IndexAnalyzers buildAnalyzers(Version version, String tokenizer) throws IOException { + Settings settings = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .build(); + Settings indexSettings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, version) + .put("index.analysis.analyzer.my_analyzer.tokenizer", tokenizer) + .build(); + IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", indexSettings); + return new AnalysisModule(TestEnvironment.newEnvironment(settings), + Collections.singletonList(new CommonAnalysisPlugin())).getAnalysisRegistry().build(idxSettings); + } + + public void testPreConfiguredTokenizer() throws IOException { + + // Before 7.3 we return ngrams of length 1 only + { + Version version = VersionUtils.randomVersionBetween(random(), Version.V_7_0_0, + VersionUtils.getPreviousVersion(Version.V_7_3_0)); + try (IndexAnalyzers indexAnalyzers = buildAnalyzers(version, "edge_ngram")) { + NamedAnalyzer analyzer = indexAnalyzers.get("my_analyzer"); + assertNotNull(analyzer); + assertAnalyzesTo(analyzer, "test", new String[]{"t"}); + } + } + + // Check deprecated name as well + { + Version version = VersionUtils.randomVersionBetween(random(), Version.V_7_0_0, + VersionUtils.getPreviousVersion(Version.V_7_3_0)); + try (IndexAnalyzers indexAnalyzers = buildAnalyzers(version, "edgeNGram")) { + NamedAnalyzer analyzer = indexAnalyzers.get("my_analyzer"); + assertNotNull(analyzer); + assertAnalyzesTo(analyzer, "test", new String[]{"t"}); + } + } + + // Afterwards, we return ngrams of length 1 and 2, to match the default factory settings + { + try (IndexAnalyzers indexAnalyzers = buildAnalyzers(Version.CURRENT, "edge_ngram")) { + NamedAnalyzer analyzer = indexAnalyzers.get("my_analyzer"); + assertNotNull(analyzer); + assertAnalyzesTo(analyzer, "test", new String[]{"t", "te"}); + } + } + + // Check deprecated name as well + { + try (IndexAnalyzers indexAnalyzers = buildAnalyzers(Version.CURRENT, "edgeNGram")) { + NamedAnalyzer analyzer = indexAnalyzers.get("my_analyzer"); + assertNotNull(analyzer); + assertAnalyzesTo(analyzer, "test", new String[]{"t", "te"}); + + } + } + + } + +} diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/WordDelimiterGraphTokenFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/WordDelimiterGraphTokenFilterFactoryTests.java index d799674f231..c8e3699ea84 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/WordDelimiterGraphTokenFilterFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/WordDelimiterGraphTokenFilterFactoryTests.java @@ -20,14 +20,24 @@ package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.core.WhitespaceTokenizer; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.analysis.AnalysisTestsHelper; +import org.elasticsearch.index.analysis.IndexAnalyzers; +import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.analysis.TokenFilterFactory; +import org.elasticsearch.indices.analysis.AnalysisModule; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.IndexSettingsModule; +import org.elasticsearch.test.VersionUtils; import java.io.IOException; import java.io.StringReader; +import java.util.Collections; public class WordDelimiterGraphTokenFilterFactoryTests extends BaseWordDelimiterTokenFilterFactoryTestCase { @@ -107,4 +117,51 @@ public class WordDelimiterGraphTokenFilterFactoryTests assertTokenStreamContents(tokenFilter.create(tokenizer), expected, expectedStartOffsets, expectedEndOffsets, null, expectedIncr, expectedPosLen, null); } + + public void testPreconfiguredFilter() throws IOException { + // Before 7.3 we don't adjust offsets + { + Settings settings = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .build(); + Settings indexSettings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, + VersionUtils.randomVersionBetween(random(), Version.V_7_0_0, VersionUtils.getPreviousVersion(Version.V_7_3_0))) + .put("index.analysis.analyzer.my_analyzer.tokenizer", "standard") + .putList("index.analysis.analyzer.my_analyzer.filter", "word_delimiter_graph") + .build(); + IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", indexSettings); + + try (IndexAnalyzers indexAnalyzers = new AnalysisModule(TestEnvironment.newEnvironment(settings), + Collections.singletonList(new CommonAnalysisPlugin())).getAnalysisRegistry().build(idxSettings)) { + + NamedAnalyzer analyzer = indexAnalyzers.get("my_analyzer"); + assertNotNull(analyzer); + assertAnalyzesTo(analyzer, "h100", new String[]{"h", "100"}, new int[]{ 0, 0 }, new int[]{ 4, 4 }); + + } + } + + // Afger 7.3 we do adjust offsets + { + Settings settings = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .build(); + Settings indexSettings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put("index.analysis.analyzer.my_analyzer.tokenizer", "standard") + .putList("index.analysis.analyzer.my_analyzer.filter", "word_delimiter_graph") + .build(); + IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", indexSettings); + + try (IndexAnalyzers indexAnalyzers = new AnalysisModule(TestEnvironment.newEnvironment(settings), + Collections.singletonList(new CommonAnalysisPlugin())).getAnalysisRegistry().build(idxSettings)) { + + NamedAnalyzer analyzer = indexAnalyzers.get("my_analyzer"); + assertNotNull(analyzer); + assertAnalyzesTo(analyzer, "h100", new String[]{"h", "100"}, new int[]{ 0, 1 }, new int[]{ 1, 4 }); + + } + } + } } diff --git a/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java b/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java index 3693b9eb2ed..236731f6899 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java @@ -111,6 +111,7 @@ public final class AnalysisRegistry implements Closeable { private T getComponentFactory(IndexSettings settings, NameOrDefinition nod, String componentType, Function> globalComponentProvider, + Function> prebuiltComponentProvider, BiFunction> indexComponentProvider) throws IOException { if (nod.definition != null) { // custom component, so we build it from scratch @@ -128,10 +129,14 @@ public final class AnalysisRegistry implements Closeable { return factory.get(settings, environment, "__anonymous__" + type, nod.definition); } if (settings == null) { - // no index provided, so we use global analysis components only - AnalysisProvider factory = globalComponentProvider.apply(nod.name); + // no index provided, so we use prebuilt analysis components + AnalysisProvider factory = prebuiltComponentProvider.apply(nod.name); if (factory == null) { - throw new IllegalArgumentException("failed to find global " + componentType + " under [" + nod.name + "]"); + // if there's no prebuilt component, try loading a global one to build with no settings + factory = globalComponentProvider.apply(nod.name); + if (factory == null) { + throw new IllegalArgumentException("failed to find global " + componentType + " under [" + nod.name + "]"); + } } return factory.get(environment, nod.name); } else { @@ -219,25 +224,26 @@ public final class AnalysisRegistry implements Closeable { public NamedAnalyzer buildCustomAnalyzer(IndexSettings indexSettings, boolean normalizer, NameOrDefinition tokenizer, List charFilters, List tokenFilters) throws IOException { TokenizerFactory tokenizerFactory - = getComponentFactory(indexSettings, tokenizer, "tokenizer", this::getTokenizerProvider, this::getTokenizerProvider); + = getComponentFactory(indexSettings, tokenizer, "tokenizer", + this::getTokenizerProvider, prebuiltAnalysis::getTokenizerFactory, this::getTokenizerProvider); List charFilterFactories = new ArrayList<>(); for (NameOrDefinition nod : charFilters) { charFilterFactories.add(getComponentFactory(indexSettings, nod, "char_filter", - this::getCharFilterProvider, this::getCharFilterProvider)); + this::getCharFilterProvider, prebuiltAnalysis::getCharFilterFactory, this::getCharFilterProvider)); } List tokenFilterFactories = new ArrayList<>(); for (NameOrDefinition nod : tokenFilters) { TokenFilterFactory tff = getComponentFactory(indexSettings, nod, "filter", - this::getTokenFilterProvider, this::getTokenFilterProvider); + this::getTokenFilterProvider, prebuiltAnalysis::getTokenFilterFactory, this::getTokenFilterProvider); if (normalizer && tff instanceof NormalizingTokenFilterFactory == false) { throw new IllegalArgumentException("Custom normalizer may not use filter [" + tff.name() + "]"); } tff = tff.getChainAwareTokenFilterFactory(tokenizerFactory, charFilterFactories, tokenFilterFactories, name -> { try { return getComponentFactory(indexSettings, new NameOrDefinition(name), "filter", - this::getTokenFilterProvider, this::getTokenFilterProvider); + this::getTokenFilterProvider, prebuiltAnalysis::getTokenFilterFactory, this::getTokenFilterProvider); } catch (IOException e) { throw new UncheckedIOException(e); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/TransportAnalyzeActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/TransportAnalyzeActionTests.java index 087dfefbbda..96814bd9a6b 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/TransportAnalyzeActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/TransportAnalyzeActionTests.java @@ -142,7 +142,7 @@ public class TransportAnalyzeActionTests extends ESTestCase { @Override public List getPreConfiguredCharFilters() { - return singletonList(PreConfiguredCharFilter.singleton("append_foo", false, reader -> new AppendCharFilter(reader, "foo"))); + return singletonList(PreConfiguredCharFilter.singleton("append", false, reader -> new AppendCharFilter(reader, "foo"))); } }; registry = new AnalysisModule(environment, singletonList(plugin)).getAnalysisRegistry(); @@ -171,24 +171,11 @@ public class TransportAnalyzeActionTests extends ESTestCase { List tokens = analyze.getTokens(); assertEquals(4, tokens.size()); - // Refer to a token filter by its type so we get its default configuration - request = new AnalyzeAction.Request(); - request.text("the qu1ck brown fox"); - request.tokenizer("standard"); - request.addTokenFilter("mock"); - analyze - = TransportAnalyzeAction.analyze(request, registry, null, maxTokenCount); - tokens = analyze.getTokens(); - assertEquals(3, tokens.size()); - assertEquals("qu1ck", tokens.get(0).getTerm()); - assertEquals("brown", tokens.get(1).getTerm()); - assertEquals("fox", tokens.get(2).getTerm()); - // We can refer to a pre-configured token filter by its name to get it request = new AnalyzeAction.Request(); request.text("the qu1ck brown fox"); request.tokenizer("standard"); - request.addCharFilter("append_foo"); + request.addCharFilter("append"); // <-- no config, so use preconfigured filter analyze = TransportAnalyzeAction.analyze(request, registry, null, maxTokenCount); tokens = analyze.getTokens(); @@ -198,31 +185,32 @@ public class TransportAnalyzeActionTests extends ESTestCase { assertEquals("brown", tokens.get(2).getTerm()); assertEquals("foxfoo", tokens.get(3).getTerm()); - // We can refer to a token filter by its type to get its default configuration + // If the preconfigured filter doesn't exist, we use a global filter with no settings request = new AnalyzeAction.Request(); request.text("the qu1ck brown fox"); request.tokenizer("standard"); - request.addCharFilter("append"); - request.text("the qu1ck brown fox"); + request.addTokenFilter("mock"); // <-- not preconfigured, but a global one available analyze = TransportAnalyzeAction.analyze(request, registry, null, maxTokenCount); tokens = analyze.getTokens(); - assertEquals(4, tokens.size()); - assertEquals("the", tokens.get(0).getTerm()); - assertEquals("qu1ck", tokens.get(1).getTerm()); - assertEquals("brown", tokens.get(2).getTerm()); - assertEquals("foxbar", tokens.get(3).getTerm()); + assertEquals(3, tokens.size()); + assertEquals("qu1ck", tokens.get(0).getTerm()); + assertEquals("brown", tokens.get(1).getTerm()); + assertEquals("fox", tokens.get(2).getTerm()); - // We can pass a new configuration - request = new AnalyzeAction.Request(); - request.text("the qu1ck brown fox"); - request.tokenizer("standard"); Map tokenFilterConfig = new HashMap<>(); tokenFilterConfig.put("type", "mock"); tokenFilterConfig.put("stopword", "brown"); - request.addTokenFilter(tokenFilterConfig); - request.addCharFilter("append"); + + Map charFilterConfig = new HashMap<>(); + charFilterConfig.put("type", "append"); + + // We can build a new char filter to get default values + request = new AnalyzeAction.Request(); request.text("the qu1ck brown fox"); + request.tokenizer("standard"); + request.addTokenFilter(tokenFilterConfig); + request.addCharFilter(charFilterConfig); // <-- basic config, uses defaults analyze = TransportAnalyzeAction.analyze(request, registry, null, maxTokenCount); tokens = analyze.getTokens(); @@ -230,6 +218,21 @@ public class TransportAnalyzeActionTests extends ESTestCase { assertEquals("the", tokens.get(0).getTerm()); assertEquals("qu1ck", tokens.get(1).getTerm()); assertEquals("foxbar", tokens.get(2).getTerm()); + + // We can pass a new configuration + request = new AnalyzeAction.Request(); + request.text("the qu1ck brown fox"); + request.tokenizer("standard"); + request.addTokenFilter(tokenFilterConfig); + charFilterConfig.put("suffix", "baz"); + request.addCharFilter(charFilterConfig); + analyze + = TransportAnalyzeAction.analyze(request, registry, null, maxTokenCount); + tokens = analyze.getTokens(); + assertEquals(3, tokens.size()); + assertEquals("the", tokens.get(0).getTerm()); + assertEquals("qu1ck", tokens.get(1).getTerm()); + assertEquals("foxbaz", tokens.get(2).getTerm()); } public void testFillsAttributes() throws IOException { From 4882b932d8acb79713d87e612198cc7067c2cfc4 Mon Sep 17 00:00:00 2001 From: Alan Woodward Date: Thu, 27 Jun 2019 12:44:29 +0100 Subject: [PATCH 04/42] Issue deprecation warnings when preconfigured delimited_payload_filter is used (#43684) #26625 deprecated delimited_payload_filter and added tests to check that warnings would be emitted when both a normal and pre-configured filter were used. Unfortunately, due to a bug in the Analyze API, the pre- configured filter check was never actually triggered, and it turns out that the deprecation warning was not in fact being emitted in this case. #43568 fixed the Analyze API bug, which then surfaced this on backport. This commit ensures that the preconfigured filter also emits the warnings and triggers an error if a new index tries to use a preconfigured delimited_payload_filter --- .../analysis/common/CommonAnalysisPlugin.java | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java index f5eb6540a75..ea0b69c678b 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java @@ -405,10 +405,18 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin, Scri input -> new CommonGramsFilter(input, CharArraySet.EMPTY_SET))); filters.add(PreConfiguredTokenFilter.singleton("czech_stem", false, CzechStemFilter::new)); filters.add(PreConfiguredTokenFilter.singleton("decimal_digit", true, DecimalDigitFilter::new)); - filters.add(PreConfiguredTokenFilter.singleton("delimited_payload_filter", false, input -> - new DelimitedPayloadTokenFilter(input, - DelimitedPayloadTokenFilterFactory.DEFAULT_DELIMITER, - DelimitedPayloadTokenFilterFactory.DEFAULT_ENCODER))); + filters.add(PreConfiguredTokenFilter.singletonWithVersion("delimited_payload_filter", false, (input, version) -> { + if (version.onOrAfter(Version.V_7_0_0)) { + throw new IllegalArgumentException( + "[delimited_payload_filter] is not supported for new indices, use [delimited_payload] instead"); + } + if (version.onOrAfter(Version.V_6_2_0)) { + deprecationLogger.deprecated("Deprecated [delimited_payload_filter] used, replaced by [delimited_payload]"); + } + return new DelimitedPayloadTokenFilter(input, + DelimitedPayloadTokenFilterFactory.DEFAULT_DELIMITER, + DelimitedPayloadTokenFilterFactory.DEFAULT_ENCODER); + })); filters.add(PreConfiguredTokenFilter.singleton("delimited_payload", false, input -> new DelimitedPayloadTokenFilter(input, DelimitedPayloadTokenFilterFactory.DEFAULT_DELIMITER, From 36360358b2187c9d20a9495ef59ab378b6c1d765 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Thu, 27 Jun 2019 14:33:28 +0200 Subject: [PATCH 05/42] Move query builder caching check to dedicated tests (#43238) Currently `AbstractQueryTestCase#testToQuery` checks the search context cachable flag. This is a bit fragile due to the high randomization of query builders performed by this general test. Also we might only rarely check the "interesting" cases because they rarely get generated when fully randomizing the query builder. This change moved the general checks out ot #testToQuery and instead adds dedicated cache tests for those query builders that exhibit something other than the default behaviour. Closes #43200 --- .../PercolateQueryBuilderTests.java | 12 +++++-- .../ScriptScoreQueryBuilder.java | 8 ++--- .../query/MoreLikeThisQueryBuilderTests.java | 29 +++++++++++++-- .../index/query/RangeQueryBuilderTests.java | 21 +++++++++++ .../index/query/ScriptQueryBuilderTests.java | 11 ++++-- .../query/ScriptScoreQueryBuilderTests.java | 11 ++++-- .../index/query/TermsQueryBuilderTests.java | 7 ---- .../query/TermsSetQueryBuilderTests.java | 36 +++++++++++++++++-- .../index/query/TypeQueryBuilderTests.java | 6 ++++ .../FunctionScoreQueryBuilderTests.java | 36 +++++++++++++++++-- .../test/AbstractQueryTestCase.java | 24 +++++++------ 11 files changed, 167 insertions(+), 34 deletions(-) diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java index 5b4dc610900..a86f93ce405 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java @@ -296,9 +296,17 @@ public class PercolateQueryBuilderTests extends AbstractQueryTestCase functionBuilder = new ScriptScoreFunctionBuilder( new Script(ScriptType.INLINE, MockScriptEngine.NAME, "1", Collections.emptyMap())); FunctionScoreQueryBuilder builder = functionScoreQuery(queryBuilder, functionBuilder); @@ -796,8 +798,38 @@ public class FunctionScoreQueryBuilderTests extends AbstractQueryTestCase scriptScoreFunction = new ScriptScoreFunctionBuilder( + new Script(ScriptType.INLINE, MockScriptEngine.NAME, "1", Collections.emptyMap())); + RandomScoreFunctionBuilder randomScoreFunctionBuilder = new RandomScoreFunctionBuilderWithFixedSeed(); + + for (ScoreFunctionBuilder scoreFunction : Arrays.asList(scriptScoreFunction, randomScoreFunctionBuilder)) { + FilterFunctionBuilder[] functions = new FilterFunctionBuilder[] { + new FilterFunctionBuilder(RandomQueryBuilder.createQuery(random()), scoreFunction) }; + queryBuilder = new FunctionScoreQueryBuilder(functions); + + context = createShardContext(); + rewriteQuery = rewriteQuery(queryBuilder, new QueryShardContext(context)); + assertNotNull(rewriteQuery.toQuery(context)); + assertFalse("query should not be cacheable: " + queryBuilder.toString(), context.isCacheable()); + } + } + + private boolean isCacheable(FunctionScoreQueryBuilder queryBuilder) { FilterFunctionBuilder[] filterFunctionBuilders = queryBuilder.filterFunctionBuilders(); for (FilterFunctionBuilder builder : filterFunctionBuilders) { if (builder.getScoreFunction() instanceof ScriptScoreFunctionBuilder) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java index 3b03f95f28f..089423770d5 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java @@ -428,13 +428,6 @@ public abstract class AbstractQueryTestCase> * we first rewrite the query with a private context, then reset the context and then build the actual lucene query*/ QueryBuilder rewritten = rewriteQuery(firstQuery, new QueryShardContext(context)); Query firstLuceneQuery = rewritten.toQuery(context); - if (isCacheable(firstQuery)) { - assertTrue("query was marked as not cacheable in the context but this test indicates it should be cacheable: " - + firstQuery.toString(), context.isCacheable()); - } else { - assertFalse("query was marked as cacheable in the context but this test indicates it should not be cacheable: " - + firstQuery.toString(), context.isCacheable()); - } assertNotNull("toQuery should not return null", firstLuceneQuery); assertLuceneQuery(firstQuery, firstLuceneQuery, searchContext); //remove after assertLuceneQuery since the assertLuceneQuery impl might access the context as well @@ -478,10 +471,6 @@ public abstract class AbstractQueryTestCase> return rewritten; } - protected boolean isCacheable(QB queryBuilder) { - return true; - } - /** * Few queries allow you to set the boost on the Java API, although the corresponding parser * doesn't parse it as it isn't supported. This method allows to disable boost related tests for those queries. @@ -809,4 +798,17 @@ public abstract class AbstractQueryTestCase> public boolean isTextField(String fieldName) { return fieldName.equals(STRING_FIELD_NAME) || fieldName.equals(STRING_ALIAS_FIELD_NAME); } + + /** + * Check that a query is generally cacheable. Tests for query builders that are not always cacheable + * should overwrite this method and make sure the different cases are always tested + */ + public void testCacheability() throws IOException { + QB queryBuilder = createTestQueryBuilder(); + QueryShardContext context = createShardContext(); + QueryBuilder rewriteQuery = rewriteQuery(queryBuilder, new QueryShardContext(context)); + assertNotNull(rewriteQuery.toQuery(context)); + assertTrue("query should be cacheable: " + queryBuilder.toString(), context.isCacheable()); + } + } From c5beb05f77b9951bc81b6ec1aac44c28e54b2a0d Mon Sep 17 00:00:00 2001 From: David Roberts Date: Thu, 27 Jun 2019 14:32:47 +0100 Subject: [PATCH 06/42] [ML][DataFrame] Consider data frame templates internal in REST tests (#43692) The data frame index template pattern was not in the list considered as internal and therefore not needing cleanup after every test. --- .../main/java/org/elasticsearch/test/rest/ESRestTestCase.java | 3 +++ 1 file changed, 3 insertions(+) diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java index a82f6c82f44..f4590859867 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java @@ -938,6 +938,9 @@ public abstract class ESRestTestCase extends ESTestCase { if (name.startsWith(".watch") || name.startsWith(".triggered_watches")) { return true; } + if (name.startsWith(".data-frame-")) { + return true; + } if (name.startsWith(".ml-")) { return true; } From cd4f81e15e7fcec44a99eeeb025b39fb2f8697ad Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Thu, 27 Jun 2019 16:51:11 +0200 Subject: [PATCH 07/42] Remove Unused AWS KMS Dependency (#43671) (#43679) * We don't make use of KMS at the moment, no need to have this dependency here --- plugins/repository-s3/build.gradle | 1 - .../repository-s3/licenses/aws-java-sdk-kms-1.11.562.jar.sha1 | 1 - 2 files changed, 2 deletions(-) delete mode 100644 plugins/repository-s3/licenses/aws-java-sdk-kms-1.11.562.jar.sha1 diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index dbdf654a13c..e32eefa5055 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -33,7 +33,6 @@ versions << [ dependencies { compile "com.amazonaws:aws-java-sdk-s3:${versions.aws}" - compile "com.amazonaws:aws-java-sdk-kms:${versions.aws}" compile "com.amazonaws:aws-java-sdk-core:${versions.aws}" compile "com.amazonaws:jmespath-java:${versions.aws}" compile "org.apache.httpcomponents:httpclient:${versions.httpclient}" diff --git a/plugins/repository-s3/licenses/aws-java-sdk-kms-1.11.562.jar.sha1 b/plugins/repository-s3/licenses/aws-java-sdk-kms-1.11.562.jar.sha1 deleted file mode 100644 index 65c85dc87b1..00000000000 --- a/plugins/repository-s3/licenses/aws-java-sdk-kms-1.11.562.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1fdf4daf1960fe760e7a950dd28a05c5abc12788 \ No newline at end of file From 329d05f61e327160faaf30e9313c25db4ab967fe Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Thu, 27 Jun 2019 16:56:15 +0200 Subject: [PATCH 08/42] Fix UOE on search requests that match a sparse role query (#43668) Search requests executed through the SecurityIndexSearcherWrapper throw an UnsupportedOperationException if they match a sparse role query. When low level cancellation is activated (which is the default since #42857), the context index searcher creates a weight that doesn't handle #scorer. This change fixes this bug and adds a test to ensure that we check this case. --- .../search/internal/ContextIndexSearcher.java | 7 ++++--- .../SecurityIndexSearcherWrapperUnitTests.java | 8 +++++++- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java b/server/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java index 7c56796f3d2..49c310ba706 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java +++ b/server/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java @@ -152,13 +152,14 @@ public class ContextIndexSearcher extends IndexSearcher implements Releasable { } @Override - public Scorer scorer(LeafReaderContext context) throws IOException { + public boolean isCacheable(LeafReaderContext ctx) { throw new UnsupportedOperationException(); } @Override - public boolean isCacheable(LeafReaderContext ctx) { - throw new UnsupportedOperationException(); + public Scorer scorer(LeafReaderContext context) throws IOException { + // in case the wrapped searcher (in) uses the scorer directly + return weight.scorer(context); } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapperUnitTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapperUnitTests.java index b9eb0241d9a..3da3949bad9 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapperUnitTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapperUnitTests.java @@ -43,6 +43,7 @@ import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.cache.bitset.BitsetFilterCache; +import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.mapper.FieldNamesFieldMapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.SeqNoFieldMapper; @@ -52,6 +53,7 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.aggregations.LeafBucketCollector; +import org.elasticsearch.search.internal.ContextIndexSearcher; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.IndexSettingsModule; import org.elasticsearch.xpack.core.security.authz.accesscontrol.DocumentSubsetReader.DocumentSubsetDirectoryReader; @@ -537,7 +539,11 @@ public class SecurityIndexSearcherWrapperUnitTests extends ESTestCase { } DocumentSubsetDirectoryReader filteredReader = DocumentSubsetReader.wrap(reader, cache, roleQuery); - IndexSearcher searcher = new SecurityIndexSearcherWrapper.IndexSearcherWrapper(filteredReader); + IndexSearcher wrapSearcher = new SecurityIndexSearcherWrapper.IndexSearcherWrapper(filteredReader); + Engine.Searcher engineSearcher = new Engine.Searcher("test", wrapSearcher, () -> {}); + ContextIndexSearcher searcher = new ContextIndexSearcher(engineSearcher, + wrapSearcher.getQueryCache(), wrapSearcher.getQueryCachingPolicy()); + searcher.setCheckCancelled(() -> {}); // Searching a non-existing term will trigger a null scorer assertEquals(0, searcher.count(new TermQuery(new Term("non_existing_field", "non_existing_value")))); From df4b30fd8bfae967c82500f7299f54a63453df11 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Thu, 27 Jun 2019 17:00:21 +0200 Subject: [PATCH 09/42] Fix propagation of enablePositionIncrements in QueryStringQueryBuilder (#43578) This change fixes the propagation of the enablePositionIncrements option to the underlying QueryBuilder. Closes #43574 --- .../index/search/MatchQuery.java | 7 ++-- .../index/search/MultiMatchQuery.java | 11 ++++--- .../index/search/QueryStringQueryParser.java | 6 ++++ .../query/QueryStringQueryBuilderTests.java | 33 ++++++++----------- 4 files changed, 30 insertions(+), 27 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/search/MatchQuery.java b/server/src/main/java/org/elasticsearch/index/search/MatchQuery.java index ec36b1dceaf..a200f6349e1 100644 --- a/server/src/main/java/org/elasticsearch/index/search/MatchQuery.java +++ b/server/src/main/java/org/elasticsearch/index/search/MatchQuery.java @@ -251,7 +251,7 @@ public class MatchQuery { Analyzer analyzer = getAnalyzer(fieldType, type == Type.PHRASE || type == Type.PHRASE_PREFIX); assert analyzer != null; - MatchQueryBuilder builder = new MatchQueryBuilder(analyzer, fieldType); + MatchQueryBuilder builder = new MatchQueryBuilder(analyzer, fieldType, enablePositionIncrements, autoGenerateSynonymsPhraseQuery); /* * If a keyword analyzer is used, we know that further analysis isn't @@ -357,15 +357,16 @@ public class MatchQuery { /** * Creates a new QueryBuilder using the given analyzer. */ - MatchQueryBuilder(Analyzer analyzer, MappedFieldType fieldType) { + MatchQueryBuilder(Analyzer analyzer, MappedFieldType fieldType, + boolean enablePositionIncrements, boolean autoGenerateSynonymsPhraseQuery) { super(analyzer); this.fieldType = fieldType; + setEnablePositionIncrements(enablePositionIncrements); if (hasPositions(fieldType)) { setAutoGenerateMultiTermSynonymsPhraseQuery(autoGenerateSynonymsPhraseQuery); } else { setAutoGenerateMultiTermSynonymsPhraseQuery(false); } - setEnablePositionIncrements(enablePositionIncrements); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java b/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java index b9943870df7..7d88e508d0d 100644 --- a/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java +++ b/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java @@ -138,9 +138,11 @@ public class MultiMatchQuery extends MatchQuery { for (Map.Entry> group : groups.entrySet()) { final MatchQueryBuilder builder; if (group.getValue().size() == 1) { - builder = new MatchQueryBuilder(group.getKey(), group.getValue().get(0).fieldType); + builder = new MatchQueryBuilder(group.getKey(), group.getValue().get(0).fieldType, + enablePositionIncrements, autoGenerateSynonymsPhraseQuery); } else { - builder = new BlendedQueryBuilder(group.getKey(), group.getValue(), tieBreaker); + builder = new BlendedQueryBuilder(group.getKey(), group.getValue(), tieBreaker, + enablePositionIncrements, autoGenerateSynonymsPhraseQuery); } /* @@ -170,8 +172,9 @@ public class MultiMatchQuery extends MatchQuery { private final List blendedFields; private final float tieBreaker; - BlendedQueryBuilder(Analyzer analyzer, List blendedFields, float tieBreaker) { - super(analyzer, blendedFields.get(0).fieldType); + BlendedQueryBuilder(Analyzer analyzer, List blendedFields, float tieBreaker, + boolean enablePositionIncrements, boolean autoGenerateSynonymsPhraseQuery) { + super(analyzer, blendedFields.get(0).fieldType, enablePositionIncrements, autoGenerateSynonymsPhraseQuery); this.blendedFields = blendedFields; this.tieBreaker = tieBreaker; } diff --git a/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java b/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java index ba2163861cd..f2b3e1dfc42 100644 --- a/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java +++ b/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java @@ -153,6 +153,12 @@ public class QueryStringQueryParser extends XQueryParser { this.lenient = lenient; } + @Override + public void setEnablePositionIncrements(boolean enable) { + super.setEnablePositionIncrements(enable); + queryBuilder.setEnablePositionIncrements(enable); + } + @Override public void setDefaultOperator(Operator op) { super.setDefaultOperator(op); diff --git a/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java index 30d877ade13..ea36a90e59b 100644 --- a/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java @@ -766,26 +766,6 @@ public class QueryStringQueryBuilderTests extends AbstractQueryTestCase Date: Thu, 27 Jun 2019 16:37:00 +0100 Subject: [PATCH 10/42] [ML] Don't write timing stats on no-op (#43680) Similar to elastic/ml-cpp#512, if a job opens and closes and does nothing in between we shouldn't write timing stats to the results index. --- .../job/persistence/TimingStatsReporter.java | 8 ++++++++ .../output/AutodetectResultProcessor.java | 2 +- .../persistence/TimingStatsReporterTests.java | 19 +++++++++++++++++++ .../rest-api-spec/test/ml/index_layout.yml | 16 ++++++++-------- 4 files changed, 36 insertions(+), 9 deletions(-) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/TimingStatsReporter.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/TimingStatsReporter.java index 51903a1676b..d30335a5f06 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/TimingStatsReporter.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/TimingStatsReporter.java @@ -42,6 +42,14 @@ public class TimingStatsReporter { } } + public void finishReporting() { + // Don't flush if current timing stats are identical to the persisted ones + if (currentTimingStats.equals(persistedTimingStats)) { + return; + } + flush(); + } + public void flush() { persistedTimingStats = new TimingStats(currentTimingStats); bulkResultsPersister.persistTimingStats(persistedTimingStats); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectResultProcessor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectResultProcessor.java index 6016af406cf..d2d052b1a3e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectResultProcessor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectResultProcessor.java @@ -134,7 +134,7 @@ public class AutodetectResultProcessor { try { if (processKilled == false) { - timingStatsReporter.flush(); + timingStatsReporter.finishReporting(); bulkResultsPersister.executeRequest(); } } catch (Exception e) { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/TimingStatsReporterTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/TimingStatsReporterTests.java index fb9f31f1d96..f2314e6de3e 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/TimingStatsReporterTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/TimingStatsReporterTests.java @@ -14,6 +14,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.mockito.Mockito.inOrder; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; import static org.mockito.Mockito.verifyZeroInteractions; public class TimingStatsReporterTests extends ESTestCase { @@ -76,6 +77,24 @@ public class TimingStatsReporterTests extends ESTestCase { inOrder.verifyNoMoreInteractions(); } + public void testFinishReportingNoChange() { + TimingStatsReporter reporter = new TimingStatsReporter(new TimingStats(JOB_ID), bulkResultsPersister); + + reporter.finishReporting(); + + verifyZeroInteractions(bulkResultsPersister); + } + + public void testFinishReportingWithChange() { + TimingStatsReporter reporter = new TimingStatsReporter(new TimingStats(JOB_ID), bulkResultsPersister); + + reporter.reportBucketProcessingTime(10); + + reporter.finishReporting(); + + verify(bulkResultsPersister).persistTimingStats(new TimingStats(JOB_ID, 1, 10.0, 10.0, 10.0, 10.0)); + } + public void testTimingStatsDifferSignificantly() { assertThat( TimingStatsReporter.differSignificantly( diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/index_layout.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/index_layout.yml index a8e4bf90d4d..eb3a73424a6 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/index_layout.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/index_layout.yml @@ -124,7 +124,7 @@ setup: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser count: index: .ml-anomalies-shared - - match: {count: 8} + - match: {count: 6} - do: headers: @@ -138,7 +138,7 @@ setup: term: job_id: index-layout-job - - match: {count: 4} + - match: {count: 3} - do: headers: @@ -152,7 +152,7 @@ setup: term: job_id: index-layout-job - - match: {count: 4} + - match: {count: 3} - do: headers: @@ -166,7 +166,7 @@ setup: term: job_id: index-layout-job2 - - match: {count: 4} + - match: {count: 3} - do: headers: @@ -179,7 +179,7 @@ setup: filter: term: job_id: index-layout-job2 - - match: {count: 4} + - match: {count: 3} - do: headers: @@ -236,7 +236,7 @@ setup: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser count: index: .ml-anomalies-shared - - match: {count: 4} + - match: {count: 3} - do: @@ -251,7 +251,7 @@ setup: term: job_id: index-layout-job2 - - match: {count: 4} + - match: {count: 3} - do: headers: @@ -265,7 +265,7 @@ setup: term: job_id: index-layout-job2 - - match: {count: 4} + - match: {count: 3} - do: ml.delete_job: From 6744344ef2beaf30d1ca8326463cb898569a21d1 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Thu, 27 Jun 2019 18:08:48 +0200 Subject: [PATCH 11/42] Handle situation where only voting-only nodes are bootstrapped (#43628) Adds support for the situation where only voting-only nodes are bootstrapped. In that case, they will still try to become elected and bring full master nodes into the cluster. --- .../coordination/CoordinationState.java | 3 +- .../AbstractCoordinatorTestCase.java | 15 ++++++++-- .../coordination/VotingOnlyNodePlugin.java | 30 +++++++++++++++---- .../VotingOnlyNodeCoordinatorTests.java | 30 ++++++++++++++++++- .../VotingOnlyNodePluginTests.java | 12 ++++++++ 5 files changed, 81 insertions(+), 9 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationState.java b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationState.java index 4b3f5cede48..4208c8e9c45 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationState.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationState.java @@ -257,7 +257,8 @@ public class CoordinationState { boolean added = joinVotes.addJoinVote(join); boolean prevElectionWon = electionWon; electionWon = isElectionQuorum(joinVotes); - assert !prevElectionWon || electionWon; // we cannot go from won to not won + assert !prevElectionWon || electionWon : // we cannot go from won to not won + "locaNode= " + localNode + ", join=" + join + ", joinVotes=" + joinVotes; logger.debug("handleJoin: added join {} from [{}] for election, electionWon={} lastAcceptedTerm={} lastAcceptedVersion={}", join, join.getSourceNode(), electionWon, lastAcceptedTerm, getLastAcceptedVersion()); diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java b/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java index 0547412f615..8bdedaceba7 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java @@ -67,6 +67,8 @@ import org.elasticsearch.indices.cluster.FakeThreadPoolMasterService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.disruption.DisruptableMockTransport; import org.elasticsearch.test.disruption.DisruptableMockTransport.ConnectionStatus; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportInterceptor; import org.elasticsearch.transport.TransportService; import org.hamcrest.Matcher; import org.hamcrest.core.IsCollectionContaining; @@ -822,7 +824,8 @@ public class AbstractCoordinatorTestCase extends ESTestCase { .putList(ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.getKey(), ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.get(Settings.EMPTY)).build(); // suppress auto-bootstrap transportService = mockTransport.createTransportService( - settings, deterministicTaskQueue.getThreadPool(this::onNode), NOOP_TRANSPORT_INTERCEPTOR, + settings, deterministicTaskQueue.getThreadPool(this::onNode), + getTransportInterceptor(localNode, deterministicTaskQueue.getThreadPool(this::onNode)), a -> localNode, null, emptySet()); masterService = new AckedFakeThreadPoolMasterService(localNode.getId(), "test", runnable -> deterministicTaskQueue.scheduleNow(onNode(runnable))); @@ -839,7 +842,7 @@ public class AbstractCoordinatorTestCase extends ESTestCase { coordinator = new Coordinator("test_node", settings, clusterSettings, transportService, writableRegistry(), allocationService, masterService, this::getPersistedState, Cluster.this::provideSeedHosts, clusterApplierService, onJoinValidators, Randomness.get(), s -> {}, - ElectionStrategy.DEFAULT_INSTANCE); + getElectionStrategy()); masterService.setClusterStatePublisher(coordinator); final GatewayService gatewayService = new GatewayService(settings, allocationService, clusterService, deterministicTaskQueue.getThreadPool(this::onNode), null, coordinator); @@ -1099,6 +1102,14 @@ public class AbstractCoordinatorTestCase extends ESTestCase { } } + protected TransportInterceptor getTransportInterceptor(DiscoveryNode localNode, ThreadPool threadPool) { + return NOOP_TRANSPORT_INTERCEPTOR; + } + + protected ElectionStrategy getElectionStrategy() { + return ElectionStrategy.DEFAULT_INSTANCE; + } + public static final String NODE_ID_LOG_CONTEXT_KEY = "nodeId"; protected static String getNodeIdForLogContext(DiscoveryNode node) { diff --git a/x-pack/plugin/voting-only-node/src/main/java/org/elasticsearch/cluster/coordination/VotingOnlyNodePlugin.java b/x-pack/plugin/voting-only-node/src/main/java/org/elasticsearch/cluster/coordination/VotingOnlyNodePlugin.java index 8394e1d21ff..7fefc0fcfed 100644 --- a/x-pack/plugin/voting-only-node/src/main/java/org/elasticsearch/cluster/coordination/VotingOnlyNodePlugin.java +++ b/x-pack/plugin/voting-only-node/src/main/java/org/elasticsearch/cluster/coordination/VotingOnlyNodePlugin.java @@ -46,6 +46,7 @@ import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.function.Predicate; import java.util.function.Supplier; public class VotingOnlyNodePlugin extends Plugin implements DiscoveryPlugin, NetworkPlugin, ActionPlugin { @@ -151,16 +152,35 @@ public class VotingOnlyNodePlugin extends Plugin implements DiscoveryPlugin, Net if (joinVotes.nodes().stream().filter(DiscoveryNode::isMasterNode).allMatch(VotingOnlyNodePlugin::isVotingOnlyNode)) { return false; } - // if there's a vote from a full master node with same last accepted term and version, that node should become master - // instead, so we should stand down - if (joinVotes.getJoins().stream().anyMatch(join -> isFullMasterNode(join.getSourceNode()) && - join.getLastAcceptedTerm() == localAcceptedTerm && - join.getLastAcceptedVersion() == localAcceptedVersion)) { + // if there's a vote from a full master node with same state (i.e. last accepted term and version match), then that node + // should become master instead, so we should stand down. There are two exceptional cases, however: + // 1) if we are in term 0. In that case, we allow electing the voting-only node to avoid poisonous situations where only + // voting-only nodes are bootstrapped. + // 2) if there is another full master node with an older state. In that case, we ensure that + // satisfiesAdditionalQuorumConstraints cannot go from true to false when adding new joinVotes in the same election. + // As voting-only nodes only broadcast the state to the full master nodes, eventually all of them will have caught up + // and there should not be any remaining full master nodes with older state, effectively disabling election of + // voting-only nodes. + if (joinVotes.getJoins().stream().anyMatch(fullMasterWithSameState(localAcceptedTerm, localAcceptedVersion)) && + localAcceptedTerm > 0 && + joinVotes.getJoins().stream().noneMatch(fullMasterWithOlderState(localAcceptedTerm, localAcceptedVersion))) { return false; } } return true; } + + private static Predicate fullMasterWithSameState(long localAcceptedTerm, long localAcceptedVersion) { + return join -> isFullMasterNode(join.getSourceNode()) && + join.getLastAcceptedTerm() == localAcceptedTerm && + join.getLastAcceptedVersion() == localAcceptedVersion; + } + + private static Predicate fullMasterWithOlderState(long localAcceptedTerm, long localAcceptedVersion) { + return join -> isFullMasterNode(join.getSourceNode()) && + (join.getLastAcceptedTerm() < localAcceptedTerm || + (join.getLastAcceptedTerm() == localAcceptedTerm && join.getLastAcceptedVersion() < localAcceptedVersion)); + } } static class VotingOnlyNodeAsyncSender implements TransportInterceptor.AsyncSender { diff --git a/x-pack/plugin/voting-only-node/src/test/java/org/elasticsearch/cluster/coordination/VotingOnlyNodeCoordinatorTests.java b/x-pack/plugin/voting-only-node/src/test/java/org/elasticsearch/cluster/coordination/VotingOnlyNodeCoordinatorTests.java index 38f00c91efe..1059f36a685 100644 --- a/x-pack/plugin/voting-only-node/src/test/java/org/elasticsearch/cluster/coordination/VotingOnlyNodeCoordinatorTests.java +++ b/x-pack/plugin/voting-only-node/src/test/java/org/elasticsearch/cluster/coordination/VotingOnlyNodeCoordinatorTests.java @@ -12,6 +12,9 @@ import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportInterceptor; +import org.junit.BeforeClass; import java.util.Collections; @@ -19,6 +22,31 @@ import static java.util.Collections.emptySet; public class VotingOnlyNodeCoordinatorTests extends AbstractCoordinatorTestCase { + @BeforeClass + public static void setPossibleRolesWithVotingOnly() { + DiscoveryNode.setPossibleRoles( + Sets.union(DiscoveryNodeRole.BUILT_IN_ROLES, Sets.newHashSet(VotingOnlyNodePlugin.VOTING_ONLY_NODE_ROLE))); + } + + @Override + protected TransportInterceptor getTransportInterceptor(DiscoveryNode localNode, ThreadPool threadPool) { + if (VotingOnlyNodePlugin.isVotingOnlyNode(localNode)) { + return new TransportInterceptor() { + @Override + public AsyncSender interceptSender(AsyncSender sender) { + return new VotingOnlyNodePlugin.VotingOnlyNodeAsyncSender(sender, () -> threadPool); + } + }; + } else { + return super.getTransportInterceptor(localNode, threadPool); + } + } + + @Override + protected ElectionStrategy getElectionStrategy() { + return new VotingOnlyNodePlugin.VotingOnlyNodeElectionStrategy(); + } + public void testDoesNotElectVotingOnlyMasterNode() { final Cluster cluster = new Cluster(randomIntBetween(1, 5), false, Settings.EMPTY); cluster.runRandomly(); @@ -26,7 +54,7 @@ public class VotingOnlyNodeCoordinatorTests extends AbstractCoordinatorTestCase final Cluster.ClusterNode leader = cluster.getAnyLeader(); assertTrue(leader.getLocalNode().isMasterNode()); - assertFalse(VotingOnlyNodePlugin.isVotingOnlyNode(leader.getLocalNode())); + assertFalse(leader.getLocalNode().toString(), VotingOnlyNodePlugin.isVotingOnlyNode(leader.getLocalNode())); } @Override diff --git a/x-pack/plugin/voting-only-node/src/test/java/org/elasticsearch/cluster/coordination/VotingOnlyNodePluginTests.java b/x-pack/plugin/voting-only-node/src/test/java/org/elasticsearch/cluster/coordination/VotingOnlyNodePluginTests.java index 8366770c1a2..055d1d4f9cd 100644 --- a/x-pack/plugin/voting-only-node/src/test/java/org/elasticsearch/cluster/coordination/VotingOnlyNodePluginTests.java +++ b/x-pack/plugin/voting-only-node/src/test/java/org/elasticsearch/cluster/coordination/VotingOnlyNodePluginTests.java @@ -68,6 +68,18 @@ public class VotingOnlyNodePluginTests extends ESIntegTestCase { equalTo(false)); } + public void testBootstrapOnlyVotingOnlyNodes() throws Exception { + internalCluster().setBootstrapMasterNodeIndex(0); + internalCluster().startNodes(Settings.builder().put(VotingOnlyNodePlugin.VOTING_ONLY_NODE_SETTING.getKey(), true).build(), + Settings.EMPTY, Settings.EMPTY); + assertBusy(() -> assertThat( + client().admin().cluster().prepareState().get().getState().getLastCommittedConfiguration().getNodeIds().size(), + equalTo(3))); + assertThat( + VotingOnlyNodePlugin.isVotingOnlyNode(client().admin().cluster().prepareState().get().getState().nodes().getMasterNode()), + equalTo(false)); + } + public void testVotingOnlyNodesCannotBeMasterWithoutFullMasterNodes() throws Exception { internalCluster().setBootstrapMasterNodeIndex(0); internalCluster().startNode(); From d46e2bb26a7a5ee7c2e245a7b142bac9bb8813a8 Mon Sep 17 00:00:00 2001 From: lcawl Date: Thu, 27 Jun 2019 09:42:47 -0700 Subject: [PATCH 12/42] [DOCS] Adds anchors and attributes to ML APIs --- .../apis/delete-transform.asciidoc | 2 +- .../data-frames/apis/get-transform.asciidoc | 2 +- .../apis/preview-transform.asciidoc | 2 +- .../data-frames/apis/put-transform.asciidoc | 2 +- .../data-frames/apis/start-transform.asciidoc | 2 +- .../data-frames/apis/stop-transform.asciidoc | 2 +- docs/reference/ml/apis/close-job.asciidoc | 28 ++++++++-------- .../ml/apis/delete-calendar-event.asciidoc | 20 ++++++------ .../ml/apis/delete-calendar-job.asciidoc | 17 +++++----- .../ml/apis/delete-calendar.asciidoc | 22 ++++++------- .../ml/apis/delete-datafeed.asciidoc | 27 ++++++++-------- .../ml/apis/delete-expired-data.asciidoc | 13 +++++--- docs/reference/ml/apis/delete-filter.asciidoc | 20 ++++++------ .../ml/apis/delete-forecast.asciidoc | 21 ++++++------ docs/reference/ml/apis/delete-job.asciidoc | 21 ++++++------ .../ml/apis/delete-snapshot.asciidoc | 19 +++++------ .../ml/apis/find-file-structure.asciidoc | 19 ++++++----- docs/reference/ml/apis/flush-job.asciidoc | 24 +++++++------- docs/reference/ml/apis/forecast.asciidoc | 23 +++++++------ docs/reference/ml/apis/get-bucket.asciidoc | 32 +++++++++---------- .../ml/apis/get-calendar-event.asciidoc | 27 +++++++++------- docs/reference/ml/apis/get-calendar.asciidoc | 30 ++++++++--------- docs/reference/ml/apis/get-category.asciidoc | 32 ++++++++++--------- .../ml/apis/get-datafeed-stats.asciidoc | 27 ++++++++-------- docs/reference/ml/apis/get-datafeed.asciidoc | 25 ++++++++------- docs/reference/ml/apis/get-filter.asciidoc | 30 ++++++++--------- .../reference/ml/apis/get-influencer.asciidoc | 29 ++++++++--------- docs/reference/ml/apis/get-job-stats.asciidoc | 28 ++++++++-------- docs/reference/ml/apis/get-job.asciidoc | 25 ++++++++------- docs/reference/ml/apis/get-ml-info.asciidoc | 18 ++++++----- .../ml/apis/get-overall-buckets.asciidoc | 28 +++++++++------- docs/reference/ml/apis/get-record.asciidoc | 30 ++++++++--------- docs/reference/ml/apis/get-snapshot.asciidoc | 27 ++++++++-------- docs/reference/ml/apis/open-job.asciidoc | 26 +++++++-------- .../ml/apis/post-calendar-event.asciidoc | 26 ++++++++------- docs/reference/ml/apis/post-data.asciidoc | 31 +++++++++--------- .../ml/apis/preview-datafeed.asciidoc | 24 +++++++------- .../ml/apis/put-calendar-job.asciidoc | 16 ++++++---- docs/reference/ml/apis/put-calendar.asciidoc | 25 ++++++++------- docs/reference/ml/apis/put-datafeed.asciidoc | 28 ++++++++-------- docs/reference/ml/apis/put-filter.asciidoc | 23 +++++++------ docs/reference/ml/apis/put-job.asciidoc | 23 +++++++------ .../ml/apis/revert-snapshot.asciidoc | 24 +++++++------- .../ml/apis/set-upgrade-mode.asciidoc | 17 ++++++---- .../reference/ml/apis/start-datafeed.asciidoc | 25 ++++++++------- docs/reference/ml/apis/stop-datafeed.asciidoc | 25 ++++++++------- .../ml/apis/update-datafeed.asciidoc | 24 ++++++++------ docs/reference/ml/apis/update-filter.asciidoc | 20 ++++++------ docs/reference/ml/apis/update-job.asciidoc | 19 ++++++----- .../ml/apis/update-snapshot.asciidoc | 23 +++++++------ .../ml/apis/validate-detector.asciidoc | 20 ++++++------ docs/reference/ml/apis/validate-job.asciidoc | 20 ++++++------ 52 files changed, 585 insertions(+), 528 deletions(-) diff --git a/docs/reference/data-frames/apis/delete-transform.asciidoc b/docs/reference/data-frames/apis/delete-transform.asciidoc index 7cc911e91ac..23c70d914f0 100644 --- a/docs/reference/data-frames/apis/delete-transform.asciidoc +++ b/docs/reference/data-frames/apis/delete-transform.asciidoc @@ -43,7 +43,7 @@ NOTE: Before you can delete the {dataframe-transform}, you must stop it. [discrete] [[delete-data-frame-transform-examples]] -==== {api-example-title} +==== {api-examples-title} [source,js] -------------------------------------------------- diff --git a/docs/reference/data-frames/apis/get-transform.asciidoc b/docs/reference/data-frames/apis/get-transform.asciidoc index 9dd0ff092d7..847d764c012 100644 --- a/docs/reference/data-frames/apis/get-transform.asciidoc +++ b/docs/reference/data-frames/apis/get-transform.asciidoc @@ -73,7 +73,7 @@ see {stack-ov}/security-privileges.html[Security privileges] and [discrete] [[get-data-frame-transform-example]] -==== {api-example-title} +==== {api-examples-title} The following example retrieves information about a maximum of ten transforms: diff --git a/docs/reference/data-frames/apis/preview-transform.asciidoc b/docs/reference/data-frames/apis/preview-transform.asciidoc index d4f2a9e6a12..4e11fd5eda2 100644 --- a/docs/reference/data-frames/apis/preview-transform.asciidoc +++ b/docs/reference/data-frames/apis/preview-transform.asciidoc @@ -42,7 +42,7 @@ If the {es} {security-features} are enabled, you must have reduce the data. See <>. [discrete] -==== {api-example-title} +==== {api-examples-title} [source,js] -------------------------------------------------- diff --git a/docs/reference/data-frames/apis/put-transform.asciidoc b/docs/reference/data-frames/apis/put-transform.asciidoc index a24cc7d2245..3c6a5251bff 100644 --- a/docs/reference/data-frames/apis/put-transform.asciidoc +++ b/docs/reference/data-frames/apis/put-transform.asciidoc @@ -71,7 +71,7 @@ IMPORTANT: You must use {kib} or this API to create a {dataframe-transform}. [discrete] [[put-data-frame-transform-example]] -==== {api-example-title} +==== {api-examples-title} [source,js] -------------------------------------------------- diff --git a/docs/reference/data-frames/apis/start-transform.asciidoc b/docs/reference/data-frames/apis/start-transform.asciidoc index 059f8a63414..e7ae0353f5c 100644 --- a/docs/reference/data-frames/apis/start-transform.asciidoc +++ b/docs/reference/data-frames/apis/start-transform.asciidoc @@ -40,7 +40,7 @@ have `view_index_metadata` privileges on the source index for the [discrete] [[start-data-frame-transform-example]] -==== {api-example-title} +==== {api-examples-title} [source,js] -------------------------------------------------- diff --git a/docs/reference/data-frames/apis/stop-transform.asciidoc b/docs/reference/data-frames/apis/stop-transform.asciidoc index 4ade2706ab2..9a08aaf0a9b 100644 --- a/docs/reference/data-frames/apis/stop-transform.asciidoc +++ b/docs/reference/data-frames/apis/stop-transform.asciidoc @@ -74,7 +74,7 @@ All {dataframe-transforms} can be stopped by using `_all` or `*` as the [discrete] [[stop-data-frame-transform-example]] -==== {api-example-title} +==== {api-examples-title} [source,js] -------------------------------------------------- diff --git a/docs/reference/ml/apis/close-job.asciidoc b/docs/reference/ml/apis/close-job.asciidoc index c5f9b5fc244..fa96b18777d 100644 --- a/docs/reference/ml/apis/close-job.asciidoc +++ b/docs/reference/ml/apis/close-job.asciidoc @@ -12,8 +12,9 @@ A job can be opened and closed multiple times throughout its lifecycle. A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results. - -==== Request +[discrete] +[[ml-close-job-request]] +==== {api-request-title} `POST _ml/anomaly_detectors//_close` + @@ -21,8 +22,8 @@ operations, but you can still explore and navigate results. `POST _ml/anomaly_detectors/_all/_close` + - -==== Description +[[ml-close-job-desc]] +==== {api-description-title} You can close multiple jobs in a single API request by using a group name, a comma-separated list of jobs, or a wildcard expression. You can close all jobs @@ -47,15 +48,16 @@ after the close job API returns. The `force` query parameter should only be use situations where the job has already failed, or where you are not interested in results the job might have recently produced or might produce in the future. - -==== Path Parameters +[discrete] +[[ml-close-job-path-parms]] +==== {api-path-parms-title} `job_id`:: (string) Identifier for the job. It can be a job identifier, a group name, or a wildcard expression. - -==== Query Parameters +[[ml-close-job-query-parms]] +==== {api-query-parms-title} `force`:: (boolean) Use to close a failed job, or to forcefully close a job which has not @@ -65,14 +67,14 @@ results the job might have recently produced or might produce in the future. (time units) Controls the time to wait until a job has closed. The default value is 30 minutes. - -==== Authorization +[[ml-close-job-prereqs]] +==== {api-prereq-title} You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see {xpack-ref}/security-privileges.html[Security Privileges]. +For more information, see {stack-ov}/security-privileges.html[Security privileges]. - -==== Examples +[[ml-close-job-example]] +==== {api-examples-title} The following example closes the `total-requests` job: diff --git a/docs/reference/ml/apis/delete-calendar-event.asciidoc b/docs/reference/ml/apis/delete-calendar-event.asciidoc index 68f7a073837..bc99398991b 100644 --- a/docs/reference/ml/apis/delete-calendar-event.asciidoc +++ b/docs/reference/ml/apis/delete-calendar-event.asciidoc @@ -8,19 +8,20 @@ Deletes scheduled events from a calendar. - -==== Request +[[ml-delete-calendar-event-request]] +==== {api-request-title} `DELETE _ml/calendars//events/` - -==== Description +[[ml-delete-calendar-event-desc]] +==== {api-description-title} This API removes individual events from a calendar. To remove all scheduled events and delete the calendar, see the <>. -==== Path Parameters +[[ml-delete-calendar-event-path-parms]] +==== {api-path-parms-title} `calendar_id`(required):: (string) Identifier for the calendar. @@ -29,13 +30,14 @@ events and delete the calendar, see the (string) Identifier for the scheduled event. You can obtain this identifier by using the <>. - -==== Authorization +[[ml-delete-calendar-event-prereqs]] +==== {api-prereq-title} You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see {xpack-ref}/security-privileges.html[Security Privileges]. +For more information, see {stack-ov}/security-privileges.html[Security privileges]. -==== Examples +[[ml-delete-calendar-event-example]] +==== {api-examples-title} The following example deletes a scheduled event from the `planned-outages` calendar: diff --git a/docs/reference/ml/apis/delete-calendar-job.asciidoc b/docs/reference/ml/apis/delete-calendar-job.asciidoc index 118a706d294..9451734c230 100644 --- a/docs/reference/ml/apis/delete-calendar-job.asciidoc +++ b/docs/reference/ml/apis/delete-calendar-job.asciidoc @@ -8,13 +8,13 @@ Deletes jobs from a calendar. - -==== Request +[[ml-delete-calendar-job-request]] +==== {api-request-title} `DELETE _ml/calendars//jobs/` - -==== Path Parameters +[[ml-delete-calendar-job-path-parms]] +==== {api-path-parms-title} `calendar_id`(required):: (string) Identifier for the calendar. @@ -23,13 +23,14 @@ Deletes jobs from a calendar. (string) An identifier for the job. It can be a job identifier, a group name, or a comma-separated list of jobs or groups. - -==== Authorization +[[ml-delete-calendar-job-prereqs]] +==== {api-prereq-title} You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see {xpack-ref}/security-privileges.html[Security Privileges]. +For more information, see {stack-ov}/security-privileges.html[Security privileges]. -==== Examples +[[ml-delete-calendar-job-example]] +==== {api-examples-title} The following example removes the association between the `planned-outages` calendar and `total-requests` job: diff --git a/docs/reference/ml/apis/delete-calendar.asciidoc b/docs/reference/ml/apis/delete-calendar.asciidoc index 2707f3175e0..c07eb37c93d 100644 --- a/docs/reference/ml/apis/delete-calendar.asciidoc +++ b/docs/reference/ml/apis/delete-calendar.asciidoc @@ -8,31 +8,31 @@ Deletes a calendar. - -==== Request +[[ml-delete-calendar-request]] +==== {api-request-title} `DELETE _ml/calendars/` - -==== Description +[[ml-delete-calendar-desc]] +==== {api-description-title} This API removes all scheduled events from the calendar then deletes the calendar. - -==== Path Parameters +[[ml-delete-calendar-path-parms]] +==== {api-path-parms-title} `calendar_id` (required):: (string) Identifier for the calendar. - -==== Authorization +[[ml-delete-calendar-prereqs]] +==== {api-prereq-title} You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see {xpack-ref}/security-privileges.html[Security Privileges]. +For more information, see {stack-ov}/security-privileges.html[Security privileges]. - -==== Examples +[[ml-delete-calendar-example]] +==== {api-examples-title} The following example deletes the `planned-outages` calendar: diff --git a/docs/reference/ml/apis/delete-datafeed.asciidoc b/docs/reference/ml/apis/delete-datafeed.asciidoc index d2a7845be75..9686959427d 100644 --- a/docs/reference/ml/apis/delete-datafeed.asciidoc +++ b/docs/reference/ml/apis/delete-datafeed.asciidoc @@ -10,38 +10,37 @@ Deletes an existing {dfeed}. - -==== Request +[[ml-delete-datafeed-request]] +==== {api-request-title} `DELETE _ml/datafeeds/` - -==== Description +[[ml-delete-datafeed-desc]] +==== {api-description-title} NOTE: Unless the `force` parameter is used, the {dfeed} must be stopped before it can be deleted. - -==== Path Parameters +[[ml-delete-datafeed-path-parms]] +==== {api-path-parms-title} `feed_id` (required):: (string) Identifier for the {dfeed} - -===== Query Parameters +[[ml-delete-datafeed-query-parms]] +==== {api-query-parms-title} `force`:: (boolean) Use to forcefully delete a started {dfeed}; this method is quicker than stopping and deleting the {dfeed}. - -===== Authorization +[[ml-delete-datafeed-prereqs]] +==== {api-prereq-title} You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see {xpack-ref}/security-privileges.html[Security Privileges]. -//<>. +For more information, see {stack-ov}/security-privileges.html[Security privileges]. - -==== Examples +[[ml-delete-datafeed-example]] +==== {api-examples-title} The following example deletes the `datafeed-total-requests` {dfeed}: diff --git a/docs/reference/ml/apis/delete-expired-data.asciidoc b/docs/reference/ml/apis/delete-expired-data.asciidoc index 8814a168673..56ca1871329 100644 --- a/docs/reference/ml/apis/delete-expired-data.asciidoc +++ b/docs/reference/ml/apis/delete-expired-data.asciidoc @@ -8,25 +8,28 @@ Deletes expired and unused machine learning data. -==== Request +[[ml-delete-expired-data-request]] +==== {api-request-title} `DELETE _ml/_delete_expired_data` -==== Description +[[ml-delete-expired-data-desc]] +==== {api-description-title} Deletes all job results, model snapshots and forecast data that have exceeded their `retention days` period. Machine learning state documents that are not associated with any job are also deleted. -==== Authorization +[[ml-delete-expired-data-prereqs]] +==== {api-prereq-title} You must have `manage_ml`, or `manage` cluster privileges to use this API. For more information, see {stack-ov}/security-privileges.html[Security Privileges] and {stack-ov}/built-in-roles.html[Built-in Roles]. - -==== Examples +[[ml-delete-expired-data-example]] +==== {api-examples-title} The endpoint takes no arguments: diff --git a/docs/reference/ml/apis/delete-filter.asciidoc b/docs/reference/ml/apis/delete-filter.asciidoc index b2cbb7ef283..8d6797448ec 100644 --- a/docs/reference/ml/apis/delete-filter.asciidoc +++ b/docs/reference/ml/apis/delete-filter.asciidoc @@ -8,32 +8,32 @@ Deletes a filter. - -==== Request +[[ml-delete-filter-request]] +==== {api-request-title} `DELETE _ml/filters/` - -==== Description +[[ml-delete-filter-desc]] +==== {api-description-title} This API deletes a {stack-ov}/ml-rules.html[filter]. If a {ml} job references the filter, you cannot delete the filter. You must update or delete the job before you can delete the filter. - -==== Path Parameters +[[ml-delete-filter-path-parms]] +==== {api-path-parms-title} `filter_id` (required):: (string) Identifier for the filter. - -==== Authorization +[[ml-delete-filter-prereqs]] +==== {api-prereq-title} You must have `manage_ml`, or `manage` cluster privileges to use this API. For more information, see {xpack-ref}/security-privileges.html[Security Privileges]. - -==== Examples +[[ml-delete-filter-example]] +==== {api-examples-title} The following example deletes the `safe_domains` filter: diff --git a/docs/reference/ml/apis/delete-forecast.asciidoc b/docs/reference/ml/apis/delete-forecast.asciidoc index 133b9105e47..8332d07f840 100644 --- a/docs/reference/ml/apis/delete-forecast.asciidoc +++ b/docs/reference/ml/apis/delete-forecast.asciidoc @@ -8,7 +8,8 @@ Deletes forecasts from a {ml} job. -==== Request +[[ml-delete-forecast-request]] +==== {api-request-title} `DELETE _ml/anomaly_detectors//_forecast` + @@ -16,8 +17,8 @@ Deletes forecasts from a {ml} job. `DELETE _ml/anomaly_detectors//_forecast/_all` - -==== Description +[[ml-delete-forecast-desc]] +==== {api-description-title} By default, forecasts are retained for 14 days. You can specify a different retention period with the `expires_in` parameter in the <>. The delete forecast API enables you to delete one or more forecasts before they expire. @@ -26,8 +27,8 @@ NOTE: When you delete a job its associated forecasts are deleted. For more information, see {stack-ov}/ml-overview.html#ml-forecasting[Forecasting the Future]. - -==== Path Parameters +[[ml-delete-forecast-path-parms]] +==== {api-path-parms-title} `job_id` (required):: (string) Identifier for the job. @@ -37,7 +38,8 @@ For more information, see {stack-ov}/ml-overview.html#ml-forecasting[Forecasting If you do not specify this optional parameter or if you specify `_all`, the API deletes all forecasts from the job. -==== Request Parameters +[[ml-delete-forecast-request-body]] +==== {api-request-body-title} `allow_no_forecasts`:: (boolean) Specifies whether an error occurs when there are no forecasts. In @@ -51,13 +53,14 @@ For more information, see {stack-ov}/ml-overview.html#ml-forecasting[Forecasting an error. The default value is `30s`. For more information about time units, see <>. - -==== Authorization +[[ml-delete-forecast-prereqs]] +==== {api-prereq-title} You must have `manage_ml`, or `manage` cluster privileges to use this API. For more information, see {stack-ov}/security-privileges.html[Security Privileges]. -==== Examples +[[ml-delete-forecast-example]] +==== {api-examples-title} The following example deletes all forecasts from the `total-requests` job: diff --git a/docs/reference/ml/apis/delete-job.asciidoc b/docs/reference/ml/apis/delete-job.asciidoc index a52c434f93c..94042ba3565 100644 --- a/docs/reference/ml/apis/delete-job.asciidoc +++ b/docs/reference/ml/apis/delete-job.asciidoc @@ -8,13 +8,13 @@ Deletes an existing anomaly detection job. - -==== Request +[[ml-delete-job-request]] +==== {api-request-title} `DELETE _ml/anomaly_detectors/` - -==== Description +[[ml-delete-job-desc]] +==== {api-description-title} All job configuration, model state and results are deleted. @@ -30,12 +30,14 @@ is used the job must be closed before it can be deleted. It is not currently possible to delete multiple jobs using wildcards or a comma separated list. -==== Path Parameters +[[ml-delete-job-path-parms]] +==== {api-path-parms-title} `job_id` (required):: (string) Identifier for the job -===== Query Parameters +[[ml-delete-job-query-parms]] +==== {api-query-parms-title} `force`:: (boolean) Use to forcefully delete an opened job; this method is quicker than @@ -45,14 +47,15 @@ separated list. (boolean) Specifies whether the request should return immediately or wait until the job deletion completes. Defaults to `true`. -==== Authorization +[[ml-delete-job-prereqs]] +==== {api-prereq-title} If {es} {security-features} are enabled, you must have `manage_ml`, or `manage` cluster privileges to use this API. For more information, see {stack-ov}/security-privileges.html[Security Privileges]. - -==== Examples +[[ml-delete-job-example]] +==== {api-examples-title} The following example deletes the `total-requests` job: diff --git a/docs/reference/ml/apis/delete-snapshot.asciidoc b/docs/reference/ml/apis/delete-snapshot.asciidoc index 18092ff8e89..461f7fb4227 100644 --- a/docs/reference/ml/apis/delete-snapshot.asciidoc +++ b/docs/reference/ml/apis/delete-snapshot.asciidoc @@ -8,19 +8,20 @@ Deletes an existing model snapshot. - -==== Request +[[ml-delete-snapshot-request]] +==== {api-request-title} `DELETE _ml/anomaly_detectors//model_snapshots/` - -==== Description +[[ml-delete-snapshot-desc]] +==== {api-description-title} IMPORTANT: You cannot delete the active model snapshot. To delete that snapshot, first revert to a different one. To identify the active model snapshot, refer to the `model_snapshot_id` in the results from the get jobs API. -==== Path Parameters +[[ml-delete-snapshot-path-parms]] +==== {api-path-parms-title} `job_id` (required):: (string) Identifier for the job @@ -28,14 +29,14 @@ the `model_snapshot_id` in the results from the get jobs API. `snapshot_id` (required):: (string) Identifier for the model snapshot - -==== Authorization +[[ml-delete-snapshot-prereqs]] +==== {api-prereq-title} You must have `manage_ml`, or `manage` cluster privileges to use this API. For more information, see {xpack-ref}/security-privileges.html[Security Privileges]. - -==== Examples +[[ml-delete-snapshot-example]] +==== {api-examples-title} The following example deletes the `1491948163` snapshot: diff --git a/docs/reference/ml/apis/find-file-structure.asciidoc b/docs/reference/ml/apis/find-file-structure.asciidoc index f3029635bf4..ead3087f3d8 100644 --- a/docs/reference/ml/apis/find-file-structure.asciidoc +++ b/docs/reference/ml/apis/find-file-structure.asciidoc @@ -11,12 +11,13 @@ experimental[] Finds the structure of a text file. The text file must contain data that is suitable to be ingested into {es}. -==== Request +[[ml-find-file-structure-request]] +==== {api-request-title} `POST _ml/find_file_structure` - -==== Description +[[ml-find-file-structure-desc]] +==== {api-description-title} This API provides a starting point for ingesting data into {es} in a format that is suitable for subsequent use with other {ml} functionality. @@ -47,7 +48,8 @@ specify the `explain` query parameter. It causes an `explanation` to appear in the response, which should help in determining why the returned structure was chosen. -==== Query Parameters +[[ml-find-file-structure-query-parms]] +==== {api-query-parms-title} `charset`:: (string) The file's character set. It must be a character set that is supported @@ -197,22 +199,23 @@ format from a built-in set. -- -==== Request Body +[[ml-find-file-structure-request-body]] +==== {api-request-body-title} The text file that you want to analyze. It must contain data that is suitable to be ingested into {es}. It does not need to be in JSON format and it does not need to be UTF-8 encoded. The size is limited to the {es} HTTP receive buffer size, which defaults to 100 Mb. - -==== Authorization +[[ml-find-file-structure-prereqs]] +==== {api-prereq-title} You must have `monitor_ml`, or `monitor` cluster privileges to use this API. For more information, see {stack-ov}/security-privileges.html[Security Privileges]. [[ml-find-file-structure-examples]] -==== Examples +==== {api-examples-title} Suppose you have a newline-delimited JSON file that contains information about some books. You can send the contents to the `find_file_structure` endpoint: diff --git a/docs/reference/ml/apis/flush-job.asciidoc b/docs/reference/ml/apis/flush-job.asciidoc index e2793b2c1a1..6598f8155b9 100644 --- a/docs/reference/ml/apis/flush-job.asciidoc +++ b/docs/reference/ml/apis/flush-job.asciidoc @@ -8,13 +8,13 @@ Forces any buffered data to be processed by the job. - -==== Request +[[ml-flush-job-request]] +==== {api-request-title} `POST _ml/anomaly_detectors//_flush` - -==== Description +[[ml-flush-job-desc]] +==== {api-description-title} The flush jobs API is only applicable when sending data for analysis using the <>. Depending on the content of the buffer, then it @@ -26,14 +26,14 @@ remains open and is available to continue analyzing data. A close operation additionally prunes and persists the model state to disk and the job must be opened again before analyzing further data. - -==== Path Parameters +[[ml-flush-job-path-parms]] +==== {api-path-parms-title} `job_id` (required):: (string) Identifier for the job - -==== Query Parameters +[[ml-flush-job-query-parms]] +==== {api-query-parms-title} `advance_time`:: (string) Specifies to advance to a particular time value. Results are @@ -56,14 +56,14 @@ opened again before analyzing further data. (string) When used in conjunction with `calc_interim`, specifies the range of buckets on which to calculate interim results. - -==== Authorization +[[ml-flush-job-prereqs]] +==== {api-prereq-title} You must have `manage_ml`, or `manage` cluster privileges to use this API. For more information, see {xpack-ref}/security-privileges.html[Security Privileges]. - -==== Examples +[[ml-flush-job-example]] +==== {api-examples-title} The following example flushes the `total-requests` job: diff --git a/docs/reference/ml/apis/forecast.asciidoc b/docs/reference/ml/apis/forecast.asciidoc index 71a7e1db2b1..05bd250975d 100644 --- a/docs/reference/ml/apis/forecast.asciidoc +++ b/docs/reference/ml/apis/forecast.asciidoc @@ -8,14 +8,15 @@ Predicts the future behavior of a time series by using its historical behavior. -==== Request +[[ml-forecast-request]] +==== {api-request-title} `POST _ml/anomaly_detectors//_forecast` +[[ml-forecast-desc]] +==== {api-description-title} -==== Description - -See {xpack-ref}/ml-overview.html#ml-forecasting[Forecasting the Future]. +See {stack-ov}/ml-overview.html#ml-forecasting[Forecasting the Future]. [NOTE] =============================== @@ -25,13 +26,14 @@ forecast. For more information about this property, see <>. * The job must be open when you create a forecast. Otherwise, an error occurs. =============================== -==== Path Parameters +[[ml-forecast-path-parms]] +==== {api-path-parms-title} `job_id`:: (string) Identifier for the job. - -==== Request Parameters +[[ml-forecast-request-body]] +==== {api-request-body-title} `duration`:: (time units) A period of time that indicates how far into the future to @@ -45,13 +47,14 @@ forecast. For more information about this property, see <>. If set to a value of `0`, the forecast is never automatically deleted. For more information about time units, see <>. -==== Authorization +[[ml-forecast-prereqs]] +==== {api-prereq-title} You must have `manage_ml`, or `manage` cluster privileges to use this API. For more information, see {xpack-ref}/security-privileges.html[Security Privileges]. - -==== Examples +[[ml-forecast-example]] +==== {api-examples-title} The following example requests a 10 day forecast for the `total-requests` job: diff --git a/docs/reference/ml/apis/get-bucket.asciidoc b/docs/reference/ml/apis/get-bucket.asciidoc index 39c548dd64e..0e2b7988e8e 100644 --- a/docs/reference/ml/apis/get-bucket.asciidoc +++ b/docs/reference/ml/apis/get-bucket.asciidoc @@ -8,21 +8,21 @@ Retrieves job results for one or more buckets. - -==== Request +[[ml-get-bucket-request]] +==== {api-request-title} `GET _ml/anomaly_detectors//results/buckets` + `GET _ml/anomaly_detectors//results/buckets/` - -==== Description +[[ml-get-bucket-desc]] +==== {api-description-title} The get buckets API presents a chronological view of the records, grouped by bucket. - -==== Path Parameters +[[ml-get-bucket-path-parms]] +==== {api-path-parms-title} `job_id`:: (string) Identifier for the job @@ -32,8 +32,8 @@ bucket. If you do not specify this optional parameter, the API returns information about all buckets. - -==== Request Body +[[ml-get-bucket-request-body]] +==== {api-request-body-title} `anomaly_score`:: (double) Returns buckets with anomaly scores greater or equal than this value. @@ -64,8 +64,8 @@ bucket. `start`:: (string) Returns buckets with timestamps after this time. - -===== Results +[[ml-get-bucket-results]] +==== {api-response-body-title} The API returns the following information: @@ -73,18 +73,18 @@ The API returns the following information: (array) An array of bucket objects. For more information, see <>. - -==== Authorization +[[ml-get-bucket-prereqs]] +==== {api-prereq-title} You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. You also need `read` index privilege on the index that stores the results. The `machine_learning_admin` and `machine_learning_user` roles provide these privileges. For more information, see -{xpack-ref}/security-privileges.html[Security Privileges] and -{xpack-ref}/built-in-roles.html[Built-in Roles]. +{stack-ov}/security-privileges.html[Security Privileges] and +{stack-ov}/built-in-roles.html[Built-in Roles]. - -==== Examples +[[ml-get-bucket-example]] +==== {api-examples-title} The following example gets bucket information for the `it-ops-kpi` job: diff --git a/docs/reference/ml/apis/get-calendar-event.asciidoc b/docs/reference/ml/apis/get-calendar-event.asciidoc index a890f67db0d..1ee94eff7b5 100644 --- a/docs/reference/ml/apis/get-calendar-event.asciidoc +++ b/docs/reference/ml/apis/get-calendar-event.asciidoc @@ -9,25 +9,27 @@ Retrieves information about the scheduled events in calendars. - -==== Request +[[ml-get-calendar-event-request]] +==== {api-request-title} `GET _ml/calendars//events` + `GET _ml/calendars/_all/events` - -===== Description +[[ml-get-calendar-event-desc]] +==== {api-description-title} You can get scheduled event information for a single calendar or for all calendars by using `_all`. -==== Path Parameters +[[ml-get-calendar-event-path-parms]] +==== {api-path-parms-title} `calendar_id` (required):: (string) Identifier for the calendar. -==== Request Body +[[ml-get-calendar-event-request-body]] +==== {api-request-body-title} `end`:: (string) Specifies to get events with timestamps earlier than this time. @@ -41,7 +43,8 @@ calendars by using `_all`. `start`:: (string) Specifies to get events with timestamps after this time. -==== Results +[[ml-get-calendar-event-results]] +==== {api-response-body-title} The API returns the following information: @@ -49,15 +52,15 @@ The API returns the following information: (array) An array of scheduled event resources. For more information, see <>. - -==== Authorization +[[ml-get-calendar-event-prereqs]] +==== {api-prereq-title} You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. +{stack-ov}/security-privileges.html[Security Privileges]. - -==== Examples +[[ml-get-calendar-event-example]] +==== {api-examples-title} The following example gets information about the scheduled events in the `planned-outages` calendar: diff --git a/docs/reference/ml/apis/get-calendar.asciidoc b/docs/reference/ml/apis/get-calendar.asciidoc index 09e429b1f6d..1ff9f8442c2 100644 --- a/docs/reference/ml/apis/get-calendar.asciidoc +++ b/docs/reference/ml/apis/get-calendar.asciidoc @@ -8,27 +8,27 @@ Retrieves configuration information for calendars. - -==== Request +[[ml-get-calendar-request]] +==== {api-request-title} `GET _ml/calendars/` + `GET _ml/calendars/_all` - -===== Description +[[ml-get-calendar-desc]] +==== {api-description-title} You can get information for a single calendar or for all calendars by using `_all`. - -==== Path Parameters +[[ml-get-calendar-path-parms]] +==== {api-path-parms-title} `calendar_id`:: (string) Identifier for the calendar. - -==== Request Body +[[ml-get-calendar-request-body]] +==== {api-request-body-title} `page`:: `from`::: @@ -37,8 +37,8 @@ You can get information for a single calendar or for all calendars by using `size`::: (integer) Specifies the maximum number of calendars to obtain. - -==== Results +[[ml-get-calendar-results]] +==== {api-response-body-title} The API returns the following information: @@ -46,15 +46,15 @@ The API returns the following information: (array) An array of calendar resources. For more information, see <>. - -==== Authorization +[[ml-get-calendar-prereqs]] +==== {api-prereq-title} You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. +{stack-ov}/security-privileges.html[Security Privileges]. - -==== Examples +[[ml-get-calendar-example]] +==== {api-examples-title} The following example gets configuration information for the `planned-outages` calendar: diff --git a/docs/reference/ml/apis/get-category.asciidoc b/docs/reference/ml/apis/get-category.asciidoc index 1fbfda20ecc..252f59c3ef2 100644 --- a/docs/reference/ml/apis/get-category.asciidoc +++ b/docs/reference/ml/apis/get-category.asciidoc @@ -8,19 +8,21 @@ Retrieves job results for one or more categories. - -==== Request +[[ml-get-category-request]] +==== {api-request-title} `GET _ml/anomaly_detectors//results/categories` + `GET _ml/anomaly_detectors//results/categories/` -==== Description +[[ml-get-category-desc]] +==== {api-description-title} For more information about categories, see -{xpack-ref}/ml-configuring-categories.html[Categorizing Log Messages]. +{stack-ov}/ml-configuring-categories.html[Categorizing Log Messages]. -==== Path Parameters +[[ml-get-category-path-parms]] +==== {api-path-parms-title} `job_id`:: (string) Identifier for the job. @@ -29,8 +31,8 @@ For more information about categories, see (long) Identifier for the category. If you do not specify this optional parameter, the API returns information about all categories in the job. - -==== Request Body +[[ml-get-category-request-body]] +==== {api-request-body-title} `page`:: `from`::: @@ -38,8 +40,8 @@ For more information about categories, see `size`::: (integer) Specifies the maximum number of categories to obtain. - -==== Results +[[ml-get-category-results]] +==== {api-response-body-title} The API returns the following information: @@ -47,18 +49,18 @@ The API returns the following information: (array) An array of category objects. For more information, see <>. - -==== Authorization +[[ml-get-category-prereqs]] +==== {api-prereq-title} You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. You also need `read` index privilege on the index that stores the results. The `machine_learning_admin` and `machine_learning_user` roles provide these privileges. For more information, see -{xpack-ref}/security-privileges.html[Security Privileges] and -{xpack-ref}/built-in-roles.html[Built-in Roles]. +{stack-ov}/security-privileges.html[Security Privileges] and +{stack-ov}/built-in-roles.html[Built-in Roles]. - -==== Examples +[[ml-get-category-example]] +==== {api-examples-title} The following example gets information about one category for the `esxi_log` job: diff --git a/docs/reference/ml/apis/get-datafeed-stats.asciidoc b/docs/reference/ml/apis/get-datafeed-stats.asciidoc index d1b842509b4..1789478e081 100644 --- a/docs/reference/ml/apis/get-datafeed-stats.asciidoc +++ b/docs/reference/ml/apis/get-datafeed-stats.asciidoc @@ -10,9 +10,8 @@ Retrieves usage information for {dfeeds}. - -==== Request - +[[ml-get-datafeed-stats-request]] +==== {api-request-title} `GET _ml/datafeeds//_stats` + @@ -22,9 +21,8 @@ Retrieves usage information for {dfeeds}. `GET _ml/datafeeds/_all/_stats` + - - -==== Description +[[ml-get-datafeed-stats-desc]] +==== {api-description-title} You can get statistics for multiple {dfeeds} in a single API request by using a comma-separated list of {dfeeds} or a wildcard expression. You can get @@ -36,15 +34,16 @@ If the {dfeed} is stopped, the only information you receive is the IMPORTANT: This API returns a maximum of 10,000 {dfeeds}. -==== Path Parameters +[[ml-get-datafeed-stats-path-parms]] +==== {api-path-parms-title} `feed_id`:: (string) Identifier for the {dfeed}. It can be a {dfeed} identifier or a wildcard expression. If you do not specify one of these options, the API returns statistics for all {dfeeds}. - -==== Results +[[ml-get-datafeed-stats-results]] +==== {api-response-body-title} The API returns the following information: @@ -52,15 +51,15 @@ The API returns the following information: (array) An array of {dfeed} count objects. For more information, see <>. - -==== Authorization +[[ml-get-datafeed-stats-prereqs]] +==== {api-prereq-title} You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. +{stack-ov}/security-privileges.html[Security Privileges]. - -==== Examples +[[ml-get-datafeed-stats-example]] +==== {api-examples-title} The following example gets usage information for the `datafeed-total-requests` {dfeed}: diff --git a/docs/reference/ml/apis/get-datafeed.asciidoc b/docs/reference/ml/apis/get-datafeed.asciidoc index 2c9ef7e3aec..8cb08818277 100644 --- a/docs/reference/ml/apis/get-datafeed.asciidoc +++ b/docs/reference/ml/apis/get-datafeed.asciidoc @@ -10,8 +10,8 @@ Retrieves configuration information for {dfeeds}. -==== Request - +[[ml-get-datafeed-request]] +==== {api-request-title} `GET _ml/datafeeds/` + @@ -21,8 +21,8 @@ Retrieves configuration information for {dfeeds}. `GET _ml/datafeeds/_all` + - -===== Description +[[ml-get-datafeed-desc]] +==== {api-description-title} You can get information for multiple {dfeeds} in a single API request by using a comma-separated list of {dfeeds} or a wildcard expression. You can get @@ -31,15 +31,16 @@ information for all {dfeeds} by using `_all`, by specifying `*` as the IMPORTANT: This API returns a maximum of 10,000 {dfeeds}. -==== Path Parameters +[[ml-get-datafeed-path-parms]] +==== {api-path-parms-title} `feed_id`:: (string) Identifier for the {dfeed}. It can be a {dfeed} identifier or a wildcard expression. If you do not specify one of these options, the API returns information about all {dfeeds}. - -==== Results +[[ml-get-datafeed-results]] +==== {api-response-body-title} The API returns the following information: @@ -47,15 +48,15 @@ The API returns the following information: (array) An array of {dfeed} objects. For more information, see <>. - -==== Authorization +[[ml-get-datafeed-prereqs]] +==== {api-prereq-title} You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. +{stack-ov}/security-privileges.html[Security Privileges]. - -==== Examples +[[ml-get-datafeed-example]] +==== {api-examples-title} The following example gets configuration information for the `datafeed-total-requests` {dfeed}: diff --git a/docs/reference/ml/apis/get-filter.asciidoc b/docs/reference/ml/apis/get-filter.asciidoc index 07fc25d756a..c69b7174272 100644 --- a/docs/reference/ml/apis/get-filter.asciidoc +++ b/docs/reference/ml/apis/get-filter.asciidoc @@ -8,27 +8,27 @@ Retrieves filters. - -==== Request +[[ml-get-filter-request]] +==== {api-request-title} `GET _ml/filters/` + `GET _ml/filters/` - -===== Description +[[ml-get-filter-desc]] +==== {api-description-title} You can get a single filter or all filters. For more information, see {stack-ov}/ml-rules.html[Machine learning custom rules]. - -==== Path Parameters +[[ml-get-filter-path-parms]] +==== {api-path-parms-title} `filter_id`:: (string) Identifier for the filter. - -==== Request Body +[[ml-get-filter-query-parms]] +==== {api-query-parms-title} `from`::: (integer) Skips the specified number of filters. @@ -36,8 +36,8 @@ You can get a single filter or all filters. For more information, see `size`::: (integer) Specifies the maximum number of filters to obtain. - -==== Results +[[ml-get-filter-results]] +==== {api-response-body-title} The API returns the following information: @@ -45,15 +45,15 @@ The API returns the following information: (array) An array of filter resources. For more information, see <>. - -==== Authorization +[[ml-get-filter-prereqs]] +==== {api-prereq-title} You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. +{stack-ov}/security-privileges.html[Security Privileges]. - -==== Examples +[[ml-get-filter-example]] +==== {api-examples-title} The following example gets configuration information for the `safe_domains` filter: diff --git a/docs/reference/ml/apis/get-influencer.asciidoc b/docs/reference/ml/apis/get-influencer.asciidoc index 7425a734ed4..fedcac20792 100644 --- a/docs/reference/ml/apis/get-influencer.asciidoc +++ b/docs/reference/ml/apis/get-influencer.asciidoc @@ -8,19 +8,19 @@ Retrieves job results for one or more influencers. - -==== Request +[[ml-get-influencer-request]] +==== {api-request-title} `GET _ml/anomaly_detectors//results/influencers` -//===== Description - -==== Path Parameters +[[ml-get-influencer-path-parms]] +==== {api-path-parms-title} `job_id`:: (string) Identifier for the job. -==== Request Body +[[ml-get-influencer-request-body]] +==== {api-request-body-title} `desc`:: (boolean) If true, the results are sorted in descending order. @@ -48,8 +48,8 @@ Retrieves job results for one or more influencers. `start`:: (string) Returns influencers with timestamps after this time. - -==== Results +[[ml-get-influencer-results]] +==== {api-response-body-title} The API returns the following information: @@ -57,19 +57,18 @@ The API returns the following information: (array) An array of influencer objects. For more information, see <>. - -==== Authorization +[[ml-get-influencer-prereqs]] +==== {api-prereq-title} You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. You also need `read` index privilege on the index that stores the results. The `machine_learning_admin` and `machine_learning_user` roles provide these privileges. For more information, see -{xpack-ref}/security-privileges.html[Security Privileges] and -{xpack-ref}/built-in-roles.html[Built-in Roles]. -//<> and <>. +{stack-ov}/security-privileges.html[Security Privileges] and +{stack-ov}/built-in-roles.html[Built-in Roles]. - -==== Examples +[[ml-get-influencer-example]] +==== {api-examples-title} The following example gets influencer information for the `it_ops_new_kpi` job: diff --git a/docs/reference/ml/apis/get-job-stats.asciidoc b/docs/reference/ml/apis/get-job-stats.asciidoc index f3a3207c1a0..4b32b11abf8 100644 --- a/docs/reference/ml/apis/get-job-stats.asciidoc +++ b/docs/reference/ml/apis/get-job-stats.asciidoc @@ -8,10 +8,8 @@ Retrieves usage information for jobs. - -==== Request - - +[[ml-get-job-stats-request]] +==== {api-request-title} `GET _ml/anomaly_detectors//_stats` @@ -21,8 +19,8 @@ Retrieves usage information for jobs. `GET _ml/anomaly_detectors/_all/_stats` + - -===== Description +[[ml-get-job-stats-desc]] +==== {api-description-title} You can get statistics for multiple jobs in a single API request by using a group name, a comma-separated list of jobs, or a wildcard expression. You can @@ -31,16 +29,16 @@ get statistics for all jobs by using `_all`, by specifying `*` as the IMPORTANT: This API returns a maximum of 10,000 jobs. - -==== Path Parameters +[[ml-get-job-stats-path-parms]] +==== {api-path-parms-title} `job_id`:: (string) An identifier for the job. It can be a job identifier, a group name, or a wildcard expression. If you do not specify one of these options, the API returns statistics for all jobs. - -==== Results +[[ml-get-job-stats-results]] +==== {api-response-body-title} The API returns the following information: @@ -48,15 +46,15 @@ The API returns the following information: (array) An array of job statistics objects. For more information, see <>. - -==== Authorization +[[ml-get-job-stats-prereqs]] +==== {api-prereq-title} You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. +{stack-ov}/security-privileges.html[Security Privileges]. - -==== Examples +[[ml-get-job-stats-example]] +==== {api-examples-title} The following example gets usage information for the `farequote` job: diff --git a/docs/reference/ml/apis/get-job.asciidoc b/docs/reference/ml/apis/get-job.asciidoc index 4eb7eaf5a7f..a4bbb66b5d0 100644 --- a/docs/reference/ml/apis/get-job.asciidoc +++ b/docs/reference/ml/apis/get-job.asciidoc @@ -8,8 +8,8 @@ Retrieves configuration information for jobs. - -==== Request +[[ml-get-job-request]] +==== {api-request-title} `GET _ml/anomaly_detectors/` + @@ -19,8 +19,8 @@ Retrieves configuration information for jobs. `GET _ml/anomaly_detectors/_all` - -===== Description +[[ml-get-job-desc]] +==== {api-description-title} You can get information for multiple jobs in a single API request by using a group name, a comma-separated list of jobs, or a wildcard expression. You can @@ -29,15 +29,16 @@ get information for all jobs by using `_all`, by specifying `*` as the IMPORTANT: This API returns a maximum of 10,000 jobs. - -==== Path Parameters +[[ml-get-job-path-parms]] +==== {api-path-parms-title} `job_id`:: (string) Identifier for the job. It can be a job identifier, a group name, or a wildcard expression. If you do not specify one of these options, the API returns information for all jobs. -==== Results +[[ml-get-job-results]] +==== {api-response-body-title} The API returns the following information: @@ -45,15 +46,15 @@ The API returns the following information: (array) An array of job resources. For more information, see <>. - -==== Authorization +[[ml-get-job-prereqs]] +==== {api-prereq-title} You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. +{stack-ov}/security-privileges.html[Security Privileges]. - -==== Examples +[[ml-get-job-example]] +==== {api-examples-title} The following example gets configuration information for the `total-requests` job: diff --git a/docs/reference/ml/apis/get-ml-info.asciidoc b/docs/reference/ml/apis/get-ml-info.asciidoc index 41b680e1327..b60a36eed29 100644 --- a/docs/reference/ml/apis/get-ml-info.asciidoc +++ b/docs/reference/ml/apis/get-ml-info.asciidoc @@ -10,28 +10,30 @@ Returns defaults and limits used by machine learning. -==== Request +[[get-ml-info-request]] +==== {api-request-title} `GET _ml/info` -==== Description +[[get-ml-info-desc]] +==== {api-description-title} This endpoint is designed to be used by a user interface that needs to fully understand machine learning configurations where some options are not specified, meaning that the defaults should be used. This endpoint may be used to find out what those defaults are. - -==== Authorization +[[get-ml-info-prereqs]] +==== {api-prereq-title} You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. The `machine_learning_admin` and `machine_learning_user` roles provide these privileges. For more information, see -{stack-ov}/security-privileges.html[Security Privileges] and -{stack-ov}/built-in-roles.html[Built-in Roles]. +{stack-ov}/security-privileges.html[Security privileges] and +{stack-ov}/built-in-roles.html[Built-in roles]. - -==== Examples +[[get-ml-info-example]] +==== {api-examples-title} The endpoint takes no arguments: diff --git a/docs/reference/ml/apis/get-overall-buckets.asciidoc b/docs/reference/ml/apis/get-overall-buckets.asciidoc index d8592e6516b..81c5c371ac4 100644 --- a/docs/reference/ml/apis/get-overall-buckets.asciidoc +++ b/docs/reference/ml/apis/get-overall-buckets.asciidoc @@ -9,7 +9,8 @@ Retrieves overall bucket results that summarize the bucket results of multiple jobs. -==== Request +[[ml-get-overall-buckets-request]] +==== {api-request-title} `GET _ml/anomaly_detectors//results/overall_buckets` + @@ -17,7 +18,8 @@ bucket results of multiple jobs. `GET _ml/anomaly_detectors/_all/results/overall_buckets` -==== Description +[[ml-get-overall-buckets-desc]] +==== {api-description-title} You can summarize the bucket results for all jobs by using `_all` or by specifying `*` as the ``. @@ -41,13 +43,15 @@ to request overall buckets that span longer than the largest job's `bucket_span` When set, the `overall_score` will be the max `overall_score` of the corresponding overall buckets with a span equal to the largest job's `bucket_span`. -==== Path Parameters +[[ml-get-overall-buckets-path-parms]] +==== {api-path-parms-title} `job_id`:: (string) Identifier for the job. It can be a job identifier, a group name, a comma-separated list of jobs or groups, or a wildcard expression. -==== Request Body +[[ml-get-overall-buckets-request-body]] +==== {api-request-body-title} `allow_no_jobs`:: (boolean) If `false` and the `job_id` does not match any job an error will @@ -76,8 +80,8 @@ overall buckets with a span equal to the largest job's `bucket_span`. (integer) The number of top job bucket scores to be used in the `overall_score` calculation. The default value is `1`. - -===== Results +[[ml-get-overall-buckets-results]] +==== {api-response-body-title} The API returns the following information: @@ -85,18 +89,18 @@ The API returns the following information: (array) An array of overall bucket objects. For more information, see <>. - -==== Authorization +[[ml-get-overall-buckets-prereqs]] +==== {api-prereq-title} You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. You also need `read` index privilege on the index that stores the results. The `machine_learning_admin` and `machine_learning_user` roles provide these privileges. For more information, see -{xpack-ref}/security-privileges.html[Security Privileges] and -{xpack-ref}/built-in-roles.html[Built-in Roles]. +{stack-ov}/security-privileges.html[Security Privileges] and +{stack-ov}/built-in-roles.html[Built-in Roles]. - -==== Examples +[[ml-get-overall-buckets-example]] +==== {api-examples-title} The following example gets overall buckets for jobs with IDs matching `job-*`: diff --git a/docs/reference/ml/apis/get-record.asciidoc b/docs/reference/ml/apis/get-record.asciidoc index afc7d2733c8..fec36aa4a56 100644 --- a/docs/reference/ml/apis/get-record.asciidoc +++ b/docs/reference/ml/apis/get-record.asciidoc @@ -8,20 +8,19 @@ Retrieves anomaly records for a job. - -==== Request +[[ml-get-record-request]] +==== {api-request-title} `GET _ml/anomaly_detectors//results/records` -//===== Description - -==== Path Parameters +[[ml-get-record-path-parms]] +==== {api-path-parms-title} `job_id`:: (string) Identifier for the job. - -==== Request Body +[[ml-get-record-request-body]] +==== {api-request-body-title} `desc`:: (boolean) If true, the results are sorted in descending order. @@ -49,8 +48,8 @@ Retrieves anomaly records for a job. `start`:: (string) Returns records with timestamps after this time. - -==== Results +[[ml-get-record-results]] +==== {api-response-body-title} The API returns the following information: @@ -58,19 +57,18 @@ The API returns the following information: (array) An array of record objects. For more information, see <>. - -==== Authorization +[[ml-get-record-prereqs]] +==== {api-prereq-title} You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. You also need `read` index privilege on the index that stores the results. The `machine_learning_admin` and `machine_learning_user` roles provide these privileges. For more information, see -{xpack-ref}/security-privileges.html[Security Privileges] and -{xpack-ref}/built-in-roles.html[Built-in Roles]. -//<> and <>. +{stack-ov}/security-privileges.html[Security privileges] and +{stack-ov}/built-in-roles.html[Built-in roles]. - -==== Examples +[[ml-get-record-example]] +==== {api-examples-title} The following example gets record information for the `it-ops-kpi` job: diff --git a/docs/reference/ml/apis/get-snapshot.asciidoc b/docs/reference/ml/apis/get-snapshot.asciidoc index 4935a6e2d23..eb5bc4354f2 100644 --- a/docs/reference/ml/apis/get-snapshot.asciidoc +++ b/docs/reference/ml/apis/get-snapshot.asciidoc @@ -8,16 +8,15 @@ Retrieves information about model snapshots. - -==== Request +[[ml-get-snapshot-request]] +==== {api-request-title} `GET _ml/anomaly_detectors//model_snapshots` + `GET _ml/anomaly_detectors//model_snapshots/` -//===== Description - -==== Path Parameters +[[ml-get-snapshot-path-parms]] +==== {api-path-parms-title} `job_id`:: (string) Identifier for the job. @@ -26,7 +25,8 @@ Retrieves information about model snapshots. (string) Identifier for the model snapshot. If you do not specify this optional parameter, the API returns information about all model snapshots. -==== Request Body +[[ml-get-snapshot-request-body]] +==== {api-request-body-title} `desc`:: (boolean) If true, the results are sorted in descending order. @@ -47,8 +47,8 @@ Retrieves information about model snapshots. `start`:: (string) Returns snapshots with timestamps after this time. - -==== Results +[[ml-get-snapshot-results]] +==== {api-response-body-title} The API returns the following information: @@ -56,16 +56,15 @@ The API returns the following information: (array) An array of model snapshot objects. For more information, see <>. - -==== Authorization +[[ml-get-snapshot-prereqs]] +==== {api-prereq-title} You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. -//<>. +{stack-ov}/security-privileges.html[Security privileges]. - -==== Examples +[[ml-get-snapshot-example]] +==== {api-examples-title} The following example gets model snapshot information for the `it_ops_new_logs` job: diff --git a/docs/reference/ml/apis/open-job.asciidoc b/docs/reference/ml/apis/open-job.asciidoc index 08c7b97d9c0..4966ab9fc65 100644 --- a/docs/reference/ml/apis/open-job.asciidoc +++ b/docs/reference/ml/apis/open-job.asciidoc @@ -10,41 +10,41 @@ Opens one or more jobs. A job must be opened in order for it to be ready to receive and analyze data. A job can be opened and closed multiple times throughout its lifecycle. - -==== Request +[[ml-open-job-request]] +==== {api-request-title} `POST _ml/anomaly_detectors/{job_id}/_open` - -==== Description +[[ml-open-job-desc]] +==== {api-description-title} When you open a new job, it starts with an empty model. When you open an existing job, the most recent model state is automatically loaded. The job is ready to resume its analysis from where it left off, once new data is received. - -==== Path Parameters +[[ml-open-job-path-parms]] +==== {api-path-parms-title} `job_id` (required):: (string) Identifier for the job - -==== Request Body +[[ml-open-job-request-body]] +==== {api-request-body-title} `timeout`:: (time) Controls the time to wait until a job has opened. The default value is 30 minutes. - -==== Authorization +[[ml-open-job-prereqs]] +==== {api-prereq-title} You must have `manage_ml`, or `manage` cluster privileges to use this API. For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. +{stack-ov}/security-privileges.html[Security privileges]. - -==== Examples +[[ml-open-job-example]] +==== {api-examples-title} The following example opens the `total-requests` job and sets an optional property: diff --git a/docs/reference/ml/apis/post-calendar-event.asciidoc b/docs/reference/ml/apis/post-calendar-event.asciidoc index 5d122a5d6d1..1a3614045ea 100644 --- a/docs/reference/ml/apis/post-calendar-event.asciidoc +++ b/docs/reference/ml/apis/post-calendar-event.asciidoc @@ -8,38 +8,40 @@ Posts scheduled events in a calendar. -==== Request +[[ml-post-calendar-event-request]] +==== {api-request-title} `POST _ml/calendars//events` +[[ml-post-calendar-event-desc]] +==== {api-description-title} -==== Description - -This API accepts a list of {xpack-ref}/ml-calendars.html[scheduled events], each +This API accepts a list of {stack-ov}/ml-calendars.html[scheduled events], each of which must have a start time, end time, and description. -==== Path Parameters +[[ml-post-calendar-event-path-parms]] +==== {api-path-parms-title} `calendar_id` (required):: (string) Identifier for the calendar. - -==== Request Body +[[ml-post-calendar-event-request-body]] +==== {api-request-body-title} `events`:: (array) A list of one of more scheduled events. The event's start and end times may be specified as integer milliseconds since the epoch or as a string in ISO 8601 format. See <>. - -==== Authorization +[[ml-post-calendar-event-prereqs]] +==== {api-prereq-title} You must have `manage_ml`, or `manage` cluster privileges to use this API. For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. +{stack-ov}/security-privileges.html[Security privileges]. - -==== Examples +[[ml-post-calendar-event-example]] +==== {api-examples-title} You can add scheduled events to the `planned-outages` calendar as follows: diff --git a/docs/reference/ml/apis/post-data.asciidoc b/docs/reference/ml/apis/post-data.asciidoc index 2df0df69e90..39fb048d8b4 100644 --- a/docs/reference/ml/apis/post-data.asciidoc +++ b/docs/reference/ml/apis/post-data.asciidoc @@ -8,13 +8,13 @@ Sends data to an anomaly detection job for analysis. - -==== Request +[[ml-post-data-request]] +==== {api-request-title} `POST _ml/anomaly_detectors//_data` - -==== Description +[[ml-post-data-desc]] +==== {api-description-title} The job must have a state of `open` to receive and process the data. @@ -42,14 +42,14 @@ IMPORTANT: For each job, data can only be accepted from a single connection at a time. It is not currently possible to post data to multiple jobs using wildcards or a comma-separated list. - -==== Path Parameters +[[ml-post-data-path-parms]] +==== {api-path-parms-title} `job_id` (required):: (string) Identifier for the job - -==== Query Parameters +[[ml-post-data-query-parms]] +==== {api-query-parms-title} `reset_start`:: (string) Specifies the start of the bucket resetting range @@ -57,22 +57,21 @@ or a comma-separated list. `reset_end`:: (string) Specifies the end of the bucket resetting range - -==== Request Body +[[ml-post-data-request-body]] +==== {api-request-body-title} A sequence of one or more JSON documents containing the data to be analyzed. Only whitespace characters are permitted in between the documents. - -==== Authorization +[[ml-post-data-prereqs]] +==== {api-prereq-title} You must have `manage_ml`, or `manage` cluster privileges to use this API. For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. -//<>. +{stack-ov}/security-privileges.html[Security privileges]. - -==== Examples +[[ml-post-data-example]] +==== {api-examples-title} The following example posts data from the it_ops_new_kpi.json file to the `it_ops_new_kpi` job: diff --git a/docs/reference/ml/apis/preview-datafeed.asciidoc b/docs/reference/ml/apis/preview-datafeed.asciidoc index 83af6a78057..cfffe96b3de 100644 --- a/docs/reference/ml/apis/preview-datafeed.asciidoc +++ b/docs/reference/ml/apis/preview-datafeed.asciidoc @@ -10,33 +10,33 @@ Previews a {dfeed}. - -==== Request +[[ml-preview-datafeed-request]] +==== {api-request-title} `GET _ml/datafeeds//_preview` - -==== Description +[[ml-preview-datafeed-desc]] +==== {api-description-title} The preview {dfeeds} API returns the first "page" of results from the `search` that is created by using the current {dfeed} settings. This preview shows the structure of the data that will be passed to the anomaly detection engine. - -==== Path Parameters +[[ml-preview-datafeed-path-parms]] +==== {api-path-parms-title} `datafeed_id` (required):: (string) Identifier for the {dfeed} - -==== Authorization +[[ml-preview-datafeed-prereqs]] +==== {api-prereq-title} If {es} {security-features} are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. For more information, see -{stack-ov}/security-privileges.html[Security Privileges]. - +{stack-ov}/security-privileges.html[Security privileges]. +[[ml-preview-datafeed-security]] ==== Security Integration When {es} {security-features} are enabled, the {dfeed} query is previewed using @@ -47,8 +47,8 @@ not accurately reflect what the {dfeed} will return when started. To avoid such problems, the same user that creates/updates the {dfeed} should preview it to ensure it is returning the expected data. - -==== Examples +[[ml-preview-datafeed-example]] +==== {api-examples-title} The following example obtains a preview of the `datafeed-farequote` {dfeed}: diff --git a/docs/reference/ml/apis/put-calendar-job.asciidoc b/docs/reference/ml/apis/put-calendar-job.asciidoc index cafc5f67062..abf124c8a11 100644 --- a/docs/reference/ml/apis/put-calendar-job.asciidoc +++ b/docs/reference/ml/apis/put-calendar-job.asciidoc @@ -8,12 +8,13 @@ Adds a job to a calendar. -==== Request +[[ml-put-calendar-job-request]] +==== {api-request-title} `PUT _ml/calendars//jobs/` - -==== Path Parameters +[[ml-put-calendar-job-path-parms]] +==== {api-path-parms-title} `calendar_id` (required):: (string) Identifier for the calendar. @@ -22,14 +23,15 @@ Adds a job to a calendar. (string) An identifier for the job. It can be a job identifier, a group name, or a comma-separated list of jobs or groups. -==== Authorization +[[ml-put-calendar-job-prereqs]] +==== {api-prereq-title} You must have `manage_ml`, or `manage` cluster privileges to use this API. For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. +{stack-ov}/security-privileges.html[Security Privileges]. - -==== Examples +[[ml-put-calendar-job-example]] +==== {api-examples-title} The following example associates the `planned-outages` calendar with the `total-requests` job: diff --git a/docs/reference/ml/apis/put-calendar.asciidoc b/docs/reference/ml/apis/put-calendar.asciidoc index 9b1e781e3cc..b7ea586a106 100644 --- a/docs/reference/ml/apis/put-calendar.asciidoc +++ b/docs/reference/ml/apis/put-calendar.asciidoc @@ -8,35 +8,38 @@ Instantiates a calendar. -==== Request +[[ml-put-calendar-request]] +==== {api-request-title} `PUT _ml/calendars/` -===== Description +[[ml-put-calendar-desc]] +==== {api-description-title} For more information, see -{xpack-ref}/ml-calendars.html[Calendars and Scheduled Events]. +{stack-ov}/ml-calendars.html[Calendars and Scheduled Events]. -==== Path Parameters +[[ml-put-calendar-path-parms]] +==== {api-path-parms-title} `calendar_id` (required):: (string) Identifier for the calendar. - -==== Request Body +[[ml-put-calendar-request-body]] +==== {api-request-body-title} `description`:: (string) A description of the calendar. - -==== Authorization +[[ml-put-calendar-prereqs]] +==== {api-prereq-title} You must have `manage_ml`, or `manage` cluster privileges to use this API. For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. +{stack-ov}/security-privileges.html[Security privileges]. - -==== Examples +[[ml-put-calendar-example]] +==== {api-examples-title} The following example creates the `planned-outages` calendar: diff --git a/docs/reference/ml/apis/put-datafeed.asciidoc b/docs/reference/ml/apis/put-datafeed.asciidoc index 2e0f6700191..428af146b4d 100644 --- a/docs/reference/ml/apis/put-datafeed.asciidoc +++ b/docs/reference/ml/apis/put-datafeed.asciidoc @@ -10,13 +10,13 @@ Instantiates a {dfeed}. - -==== Request +[[ml-put-datafeed-request]] +==== {api-request-title} `PUT _ml/datafeeds/` - -==== Description +[[ml-put-datafeed-desc]] +==== {api-description-title} You must create a job before you create a {dfeed}. You can associate only one {dfeed} to each job. @@ -26,16 +26,16 @@ IMPORTANT: You must use {kib} or this API to create a {dfeed}. Do not put a {df If {es} {security-features} are enabled, do not give users `write` privileges on the `.ml-config` index. - -==== Path Parameters +[[ml-put-datafeed-path-parms]] +==== {api-path-parms-title} `feed_id` (required):: (string) A numerical character string that uniquely identifies the {dfeed}. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. - -==== Request Body +[[ml-put-datafeed-request-body]] +==== {api-request-body-title} `aggregations`:: (object) If set, the {dfeed} performs aggregation searches. @@ -90,22 +90,22 @@ IMPORTANT: You must use {kib} or this API to create a {dfeed}. Do not put a {df For more information about these properties, see <>. - -==== Authorization +[[ml-put-datafeed-prereqs]] +==== {api-prereq-title} If {es} {security-features} are enabled, you must have `manage_ml`, or `manage` cluster privileges to use this API. For more information, see -{stack-ov}/security-privileges.html[Security Privileges]. - +{stack-ov}/security-privileges.html[Security privileges]. +[[ml-put-datafeed-security]] ==== Security integration When {es} {security-features} are enabled, your {dfeed} remembers which roles the user who created it had at the time of creation and runs the query using those same roles. - -==== Examples +[[ml-put-datafeed-example]] +==== {api-examples-title} The following example creates the `datafeed-total-requests` {dfeed}: diff --git a/docs/reference/ml/apis/put-filter.asciidoc b/docs/reference/ml/apis/put-filter.asciidoc index abe52dfb13b..61ed24f4d5b 100644 --- a/docs/reference/ml/apis/put-filter.asciidoc +++ b/docs/reference/ml/apis/put-filter.asciidoc @@ -8,23 +8,26 @@ Instantiates a filter. -==== Request +[[ml-put-filter-request]] +==== {api-request-title} `PUT _ml/filters/` -===== Description +[[ml-put-filter-desc]] +==== {api-description-title} A {stack-ov}/ml-rules.html[filter] contains a list of strings. It can be used by one or more jobs. Specifically, filters are referenced in the `custom_rules` property of <>. -==== Path Parameters +[[ml-put-filter-path-parms]] +==== {api-path-parms-title} `filter_id` (required):: (string) Identifier for the filter. - -==== Request Body +[[ml-put-filter-request-body]] +==== {api-request-body-title} `description`:: (string) A description of the filter. @@ -35,15 +38,15 @@ the `custom_rules` property of <` -===== Description +[[ml-put-job-desc]] +==== {api-description-title} IMPORTANT: You must use {kib} or this API to create a {ml} job. Do not put a job directly to the `.ml-config` index using the Elasticsearch index API. If {es} {security-features} are enabled, do not give users `write` privileges on the `.ml-config` index. - -==== Path Parameters +[[ml-put-job-path-parms]] +==== {api-path-parms-title} `job_id` (required):: (string) Identifier for the job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. - -==== Request Body +[[ml-put-job-request-body]] +==== {api-request-body-title} `analysis_config`:: (object) The analysis configuration, which specifies how to analyze the data. @@ -78,14 +80,15 @@ IMPORTANT: You must use {kib} or this API to create a {ml} job. Do not put a job (long) Advanced configuration option. The number of days for which job results are retained. See <>. -==== Authorization +[[ml-put-job-prereqs]] +==== {api-prereq-title} You must have `manage_ml`, or `manage` cluster privileges to use this API. For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. +{stack-ov}/security-privileges.html[Security privileges]. - -==== Examples +[[ml-put-job-example]] +==== {api-examples-title} The following example creates the `total-requests` job: diff --git a/docs/reference/ml/apis/revert-snapshot.asciidoc b/docs/reference/ml/apis/revert-snapshot.asciidoc index b560f7b0412..f470b4ec60f 100644 --- a/docs/reference/ml/apis/revert-snapshot.asciidoc +++ b/docs/reference/ml/apis/revert-snapshot.asciidoc @@ -8,12 +8,13 @@ Reverts to a specific snapshot. -==== Request +[[ml-revert-snapshot-request]] +==== {api-request-title} `POST _ml/anomaly_detectors//model_snapshots//_revert` - -==== Description +[[ml-revert-snapshot-desc]] +==== {api-description-title} The {ml} feature in {xpack} reacts quickly to anomalous input, learning new behaviors in data. Highly anomalous input increases the variance in the models @@ -25,8 +26,8 @@ Friday or a critical system failure. IMPORTANT: Before you revert to a saved snapshot, you must close the job. - -==== Path Parameters +[[ml-revert-snapshot-path-parms]] +==== {api-path-parms-title} `job_id` (required):: (string) Identifier for the job @@ -34,7 +35,8 @@ IMPORTANT: Before you revert to a saved snapshot, you must close the job. `snapshot_id` (required):: (string) Identifier for the model snapshot -==== Request Body +[[ml-revert-snapshot-request-body]] +==== {api-request-body-title} `delete_intervening_results`:: (boolean) If true, deletes the results in the time period between the @@ -45,15 +47,15 @@ NOTE: If you choose not to delete intervening results when reverting a snapshot, the job will not accept input data that is older than the current time. If you want to resend data, then delete the intervening results. - -==== Authorization +[[ml-revert-snapshot-prereqs]] +==== {api-prereq-title} You must have `manage_ml`, or `manage` cluster privileges to use this API. For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. +{stack-ov}/security-privileges.html[Security privileges]. - -==== Examples +[[ml-revert-snapshot-example]] +==== {api-examples-title} The following example reverts to the `1491856080` snapshot for the `it_ops_new_kpi` job: diff --git a/docs/reference/ml/apis/set-upgrade-mode.asciidoc b/docs/reference/ml/apis/set-upgrade-mode.asciidoc index 5434d70d4e6..16ddbe19e59 100644 --- a/docs/reference/ml/apis/set-upgrade-mode.asciidoc +++ b/docs/reference/ml/apis/set-upgrade-mode.asciidoc @@ -9,7 +9,8 @@ Sets a cluster wide upgrade_mode setting that prepares {ml} indices for an upgrade. -==== Request +[[ml-set-upgrade-mode-request]] +==== {api-request-title} ////////////////////////// [source,js] @@ -25,7 +26,8 @@ POST /_ml/set_upgrade_mode?enabled=false&timeout=10m `POST _ml/set_upgrade_mode` -==== Description +[[ml-set-upgrade-mode-desc]] +==== {api-description-title} When upgrading your cluster, in some circumstances you must restart your nodes and reindex your {ml} indices. In those circumstances, there must be no {ml} jobs running. @@ -37,7 +39,6 @@ though stopping jobs is not a requirement in that case. For more information, see {stack-ref}/upgrading-elastic-stack.html[Upgrading the {stack}]. - When `enabled=true` this API temporarily halts all job and {dfeed} tasks and prohibits new job and {dfeed} tasks from starting. @@ -50,7 +51,8 @@ You can see the current value for the `upgrade_mode` setting by using the IMPORTANT: No new {ml} jobs can be opened while the `upgrade_mode` setting is `true`. -==== Query Parameters +[[ml-set-upgrade-mode-query-parms]] +==== {api-query-parms-title} `enabled`:: (boolean) When `true`, this enables `upgrade_mode`. Defaults to `false` @@ -59,14 +61,15 @@ IMPORTANT: No new {ml} jobs can be opened while the `upgrade_mode` setting is (time) The time to wait for the request to be completed. The default value is 30 seconds. -==== Authorization +[[ml-set-upgrade-mode-prereqs]] +==== {api-prereq-title} You must have `manage_ml`, or `manage` cluster privileges to use this API. For more information, see {stack-ov}/security-privileges.html[Security privileges]. - -==== Examples +[[ml-set-upgrade-mode-example]] +==== {api-examples-title} The following example enables `upgrade_mode` for the cluster: diff --git a/docs/reference/ml/apis/start-datafeed.asciidoc b/docs/reference/ml/apis/start-datafeed.asciidoc index aee237b72c8..35c632d5c41 100644 --- a/docs/reference/ml/apis/start-datafeed.asciidoc +++ b/docs/reference/ml/apis/start-datafeed.asciidoc @@ -12,11 +12,13 @@ Starts one or more {dfeeds}. A {dfeed} must be started in order to retrieve data from {es}. A {dfeed} can be started and stopped multiple times throughout its lifecycle. -==== Request +[[ml-start-datafeed-request]] +==== {api-request-title} `POST _ml/datafeeds//_start` -==== Description +[[ml-start-datafeed-desc]] +==== {api-description-title} NOTE: Before you can start a {dfeed}, the job must be open. Otherwise, an error occurs. @@ -56,13 +58,14 @@ If you specify a `start` value that is earlier than the timestamp of the latest processed record, the {dfeed} continues from 1 millisecond after the timestamp of the latest processed record. - -==== Path Parameters +[[ml-start-datafeed-path-parms]] +==== {api-path-parms-title} `feed_id` (required):: (string) Identifier for the {dfeed} -==== Request Body +[[ml-start-datafeed-request-body]] +==== {api-request-body-title} `end`:: (string) The time that the {dfeed} should end. This value is exclusive. @@ -76,22 +79,22 @@ of the latest processed record. (time) Controls the amount of time to wait until a {dfeed} starts. The default value is 20 seconds. - -==== Authorization +[[ml-start-datafeed-prereqs]] +==== {api-prereq-title} If {es} {security-features} are enabled, you must have `manage_ml`, or `manage` cluster privileges to use this API. For more information, see -{stack-ov}/security-privileges.html[Security Privileges]. - +{stack-ov}/security-privileges.html[Security privileges]. +[[ml-start-datafeed-security]] ==== Security integration When {es} {security-features} are enabled, your {dfeed} remembers which roles the last user to create or update it had at the time of creation/update and runs the query using those same roles. - -==== Examples +[[ml-start-datafeed-example]] +==== {api-examples-title} The following example starts the `datafeed-it-ops-kpi` {dfeed}: diff --git a/docs/reference/ml/apis/stop-datafeed.asciidoc b/docs/reference/ml/apis/stop-datafeed.asciidoc index 1489137b9db..497975f425c 100644 --- a/docs/reference/ml/apis/stop-datafeed.asciidoc +++ b/docs/reference/ml/apis/stop-datafeed.asciidoc @@ -13,7 +13,8 @@ Stops one or more {dfeeds}. A {dfeed} that is stopped ceases to retrieve data from {es}. A {dfeed} can be started and stopped multiple times throughout its lifecycle. -==== Request +[[ml-stop-datafeed-request]] +==== {api-request-title} `POST _ml/datafeeds//_stop` + @@ -21,22 +22,22 @@ A {dfeed} can be started and stopped multiple times throughout its lifecycle. `POST _ml/datafeeds/_all/_stop` - -===== Description +[[ml-stop-datafeed-desc]] +==== {api-description-title} You can stop multiple {dfeeds} in a single API request by using a comma-separated list of {dfeeds} or a wildcard expression. You can close all {dfeeds} by using `_all` or by specifying `*` as the ``. - -==== Path Parameters +[[ml-stop-datafeed-path-parms]] +==== {api-path-parms-title} `feed_id`:: (string) Identifier for the {dfeed}. It can be a {dfeed} identifier or a wildcard expression. - -==== Request Body +[[ml-stop-datafeed-request-body]] +==== {api-request-body-title} `force`:: (boolean) If true, the {dfeed} is stopped forcefully. @@ -45,15 +46,15 @@ comma-separated list of {dfeeds} or a wildcard expression. You can close all (time) Controls the amount of time to wait until a {dfeed} stops. The default value is 20 seconds. - -==== Authorization +[[ml-stop-datafeed-prereqs]] +==== {api-prereq-title} You must have `manage_ml`, or `manage` cluster privileges to use this API. For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. +{stack-ov}/security-privileges.html[Security privileges]. - -==== Examples +[[ml-stop-datafeed-example]] +==== {api-examples-title} The following example stops the `datafeed-total-requests` {dfeed}: diff --git a/docs/reference/ml/apis/update-datafeed.asciidoc b/docs/reference/ml/apis/update-datafeed.asciidoc index 63878913c7f..9c3e56e66a6 100644 --- a/docs/reference/ml/apis/update-datafeed.asciidoc +++ b/docs/reference/ml/apis/update-datafeed.asciidoc @@ -10,21 +10,25 @@ Updates certain properties of a {dfeed}. -==== Request +[[ml-update-datafeed-request]] +==== {api-request-title} `POST _ml/datafeeds//_update` -===== Description +[[ml-update-datafeed-desc]] +==== {api-description-title} NOTE: If you update the `delayed_data_check_config` property, you must stop and start the {dfeed} for the change to be applied. -==== Path Parameters +[[ml-update-datafeed-path-parms]] +==== {api-path-parms-title} `feed_id` (required):: (string) Identifier for the {dfeed} -==== Request Body +[[ml-update-datafeed-request-body]] +==== {api-request-body-title} The following properties can be updated after the {dfeed} is created: @@ -80,22 +84,22 @@ The following properties can be updated after the {dfeed} is created: For more information about these properties, see <>. - -==== Authorization +[[ml-update-datafeed-prereqs]] +==== {api-prereq-title} If {es} {security-features} are enabled, you must have `manage_ml`, or `manage` cluster privileges to use this API. For more information, see -{stack-ov}/security-privileges.html[Security Privileges]. - +{stack-ov}/security-privileges.html[Security privileges]. +[[ml-update-datafeed-security]] ==== Security Integration When {es} {security-features} are enabled, your {dfeed} remembers which roles the user who updated it had at the time of update and runs the query using those same roles. - -==== Examples +[[ml-update-datafeed-example]] +==== {api-examples-title} The following example updates the query for the `datafeed-total-requests` {dfeed} so that only log entries of error level are analyzed: diff --git a/docs/reference/ml/apis/update-filter.asciidoc b/docs/reference/ml/apis/update-filter.asciidoc index 45c294a0b8b..842808ebe55 100644 --- a/docs/reference/ml/apis/update-filter.asciidoc +++ b/docs/reference/ml/apis/update-filter.asciidoc @@ -8,18 +8,18 @@ Updates the description of a filter, adds items, or removes items. -==== Request +[[ml-update-filter-request]] +==== {api-request-title} `POST _ml/filters//_update` -//==== Description - -==== Path Parameters +[[ml-update-filter-path-parms]] +==== {api-path-parms-title} `filter_id` (required):: (string) Identifier for the filter. - +[[ml-update-filter-request-body]] ==== Request Body `description`:: @@ -31,15 +31,15 @@ Updates the description of a filter, adds items, or removes items. `remove_items`:: (array of strings) The items to remove from the filter. - -==== Authorization +[[ml-update-filter-prereqs]] +==== {api-prereq-title} You must have `manage_ml`, or `manage` cluster privileges to use this API. For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. +{stack-ov}/security-privileges.html[Security privileges]. - -==== Examples +[[ml-update-filter-example]] +==== {api-examples-title} You can change the description, add and remove items to the `safe_domains` filter as follows: diff --git a/docs/reference/ml/apis/update-job.asciidoc b/docs/reference/ml/apis/update-job.asciidoc index 3382e7fe346..39c510bda1e 100644 --- a/docs/reference/ml/apis/update-job.asciidoc +++ b/docs/reference/ml/apis/update-job.asciidoc @@ -8,17 +8,19 @@ Updates certain properties of a job. -==== Request +[[ml-update-job-request]] +==== {api-request-title} `POST _ml/anomaly_detectors//_update` - -==== Path Parameters +[[ml-update-job-path-parms]] +==== {api-path-parms-title} `job_id` (required):: (string) Identifier for the job -==== Request Body +[[ml-update-job-request-body]] +==== {api-request-body-title} The following properties can be updated after the job is created: @@ -86,14 +88,15 @@ A detector update object has the following properties: No other detector property can be updated. -==== Authorization +[[ml-update-job-prereqs]] +==== {api-prereq-title} You must have `manage_ml`, or `manage` cluster privileges to use this API. For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. +{stack-ov}/security-privileges.html[Security privileges]. - -==== Examples +[[ml-update-job-example]] +==== {api-examples-title} The following example updates the `total-requests` job: diff --git a/docs/reference/ml/apis/update-snapshot.asciidoc b/docs/reference/ml/apis/update-snapshot.asciidoc index ffd38f590b1..edf9e05d867 100644 --- a/docs/reference/ml/apis/update-snapshot.asciidoc +++ b/docs/reference/ml/apis/update-snapshot.asciidoc @@ -8,14 +8,13 @@ Updates certain properties of a snapshot. -==== Request +[[ml-update-snapshot-request]] +==== {api-request-title} `POST _ml/anomaly_detectors//model_snapshots//_update` - -//==== Description - -==== Path Parameters +[[ml-update-snapshot-path-parms]] +==== {api-path-parms-title} `job_id` (required):: (string) Identifier for the job @@ -23,7 +22,8 @@ Updates certain properties of a snapshot. `snapshot_id` (required):: (string) Identifier for the model snapshot -==== Request Body +[[ml-update-snapshot-request-body]] +==== {api-request-body-title} The following properties can be updated after the model snapshot is created: @@ -37,16 +37,15 @@ The following properties can be updated after the model snapshot is created: Note that this snapshot will still be deleted when the job is deleted. The default value is false. - -==== Authorization +[[ml-update-snapshot-prereqs]] +==== {api-prereq-title} You must have `manage_ml`, or `manage` cluster privileges to use this API. For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. -//<>. +{stack-ov}/security-privileges.html[Security privileges]. - -==== Examples +[[ml-update-snapshot-example]] +==== {api-examples-title} The following example updates the snapshot identified as `1491852978`: diff --git a/docs/reference/ml/apis/validate-detector.asciidoc b/docs/reference/ml/apis/validate-detector.asciidoc index 0f9fe9902e3..a3b7ca66072 100644 --- a/docs/reference/ml/apis/validate-detector.asciidoc +++ b/docs/reference/ml/apis/validate-detector.asciidoc @@ -8,30 +8,32 @@ Validates detector configuration information. -==== Request +[[ml-valid-detector-request]] +==== {api-request-title} `POST _ml/anomaly_detectors/_validate/detector` -==== Description +[[ml-valid-detector-desc]] +==== {api-description-title} The validate detectors API enables you validate the detector configuration before you create a job. - -==== Request Body +[[ml-valid-detector-request-body]] +==== {api-request-body-title} For a list of the properties that you can specify in the body of this API, see <>. - -==== Authorization +[[ml-valid-detector-prereqs]] +==== {api-prereq-title} You must have `manage_ml`, or `manage` cluster privileges to use this API. For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. +{stack-ov}/security-privileges.html[Security privileges]. - -==== Examples +[[ml-valid-detector-example]] +==== {api-examples-title} The following example validates detector configuration information: diff --git a/docs/reference/ml/apis/validate-job.asciidoc b/docs/reference/ml/apis/validate-job.asciidoc index 5fbfb62dd28..651e4571569 100644 --- a/docs/reference/ml/apis/validate-job.asciidoc +++ b/docs/reference/ml/apis/validate-job.asciidoc @@ -8,30 +8,32 @@ Validates job configuration information. -==== Request +[[ml-valid-job-request]] +==== {api-request-title} `POST _ml/anomaly_detectors/_validate` -==== Description +[[ml-valid-job-desc]] +==== {api-description-title} The validate jobs API enables you validate the job configuration before you create the job. - -==== Request Body +[[ml-valid-job-request-body]] +==== {api-request-body-title} For a list of the properties that you can specify in the body of this API, see <>. - -==== Authorization +[[ml-valid-job-prereqs]] +==== {api-prereq-title} You must have `manage_ml`, or `manage` cluster privileges to use this API. For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. +{stack-ov}/security-privileges.html[Security privileges]. - -==== Examples +[[ml-valid-job-example]] +==== {api-examples-title} The following example validates job configuration information: From 68dbbd87937344e315417c38c060b5c4e6868b58 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Przemys=C5=82aw=20Witek?= Date: Thu, 27 Jun 2019 18:51:48 +0200 Subject: [PATCH 13/42] Deduplicate two similar TimeUtils classes. (#43697) * Deduplicate org.elasticsearch.xpack.core.dataframe.utils.TimeUtils and org.elasticsearch.xpack.core.ml.utils.time.TimeUtils into a common class: org.elasticsearch.xpack.core.common.time.TimeUtils. * Add unit tests for parseTimeField and parseTimeFieldToInstant methods --- .../utils => common/time}/TimeUtils.java | 8 +- .../notifications/DataFrameAuditMessage.java | 2 +- .../transforms/DataFrameTransformConfig.java | 2 +- .../xpack/core/ml/annotations/Annotation.java | 2 +- .../core/ml/calendars/ScheduledEvent.java | 2 +- .../core/ml/datafeed/DatafeedConfig.java | 2 +- .../ml/datafeed/DelayedDataCheckConfig.java | 2 +- .../core/ml/job/config/AnalysisConfig.java | 2 +- .../xpack/core/ml/job/config/Job.java | 2 +- .../output/FlushAcknowledgement.java | 2 +- .../process/autodetect/state/DataCounts.java | 2 +- .../autodetect/state/ModelSizeStats.java | 2 +- .../autodetect/state/ModelSnapshot.java | 2 +- .../core/ml/job/results/AnomalyRecord.java | 2 +- .../xpack/core/ml/job/results/Bucket.java | 2 +- .../core/ml/job/results/BucketInfluencer.java | 2 +- .../xpack/core/ml/job/results/Forecast.java | 2 +- .../xpack/core/ml/job/results/Influencer.java | 2 +- .../xpack/core/ml/job/results/ModelPlot.java | 2 +- .../core/ml/notifications/AuditMessage.java | 2 +- .../xpack/core/ml/utils/time/TimeUtils.java | 129 ------------------ .../AbstractAuditMessageTests.java | 2 +- .../core/common}/time/TimeUtilsTests.java | 49 ++++++- .../autodetect/params/FlushJobParams.java | 2 +- .../process/autodetect/params/TimeRange.java | 2 +- 25 files changed, 71 insertions(+), 159 deletions(-) rename x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/{dataframe/utils => common/time}/TimeUtils.java (96%) delete mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/time/TimeUtils.java rename x-pack/plugin/{ml/src/test/java/org/elasticsearch/xpack/ml/utils => core/src/test/java/org/elasticsearch/xpack/core/common}/time/TimeUtilsTests.java (64%) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/utils/TimeUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/time/TimeUtils.java similarity index 96% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/utils/TimeUtils.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/time/TimeUtils.java index 21a4692f547..e345feb59b0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/utils/TimeUtils.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/time/TimeUtils.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.utils; +package org.elasticsearch.xpack.core.common.time; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.ParseField; @@ -26,7 +26,7 @@ public final class TimeUtils { if (parser.currentToken() == XContentParser.Token.VALUE_NUMBER) { return new Date(parser.longValue()); } else if (parser.currentToken() == XContentParser.Token.VALUE_STRING) { - return new Date(TimeUtils.dateStringToEpoch(parser.text())); + return new Date(dateStringToEpoch(parser.text())); } throw new IllegalArgumentException( "unexpected token [" + parser.currentToken() + "] for [" + fieldName + "]"); @@ -36,7 +36,7 @@ public final class TimeUtils { if (parser.currentToken() == XContentParser.Token.VALUE_NUMBER) { return Instant.ofEpochMilli(parser.longValue()); } else if (parser.currentToken() == XContentParser.Token.VALUE_STRING) { - return Instant.ofEpochMilli(TimeUtils.dateStringToEpoch(parser.text())); + return Instant.ofEpochMilli(dateStringToEpoch(parser.text())); } throw new IllegalArgumentException( "unexpected token [" + parser.currentToken() + "] for [" + fieldName + "]"); @@ -123,8 +123,6 @@ public final class TimeUtils { } } - - /** * Check the given {@code timeValue} is a multiple of the {@code baseUnit} */ diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/notifications/DataFrameAuditMessage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/notifications/DataFrameAuditMessage.java index dd6aee25580..e0ebd8e97d9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/notifications/DataFrameAuditMessage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/notifications/DataFrameAuditMessage.java @@ -11,8 +11,8 @@ import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.xpack.core.common.notifications.AbstractAuditMessage; import org.elasticsearch.xpack.core.common.notifications.Level; +import org.elasticsearch.xpack.core.common.time.TimeUtils; import org.elasticsearch.xpack.core.dataframe.DataFrameField; -import org.elasticsearch.xpack.core.dataframe.utils.TimeUtils; import java.util.Date; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformConfig.java index 2762e0507ef..e3ad50d9b88 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformConfig.java @@ -20,11 +20,11 @@ import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParserUtils; +import org.elasticsearch.xpack.core.common.time.TimeUtils; import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; import org.elasticsearch.xpack.core.dataframe.transforms.pivot.PivotConfig; import org.elasticsearch.xpack.core.dataframe.utils.ExceptionsHelper; -import org.elasticsearch.xpack.core.dataframe.utils.TimeUtils; import java.io.IOException; import java.time.Instant; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/annotations/Annotation.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/annotations/Annotation.java index 185808de004..91c4053ed15 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/annotations/Annotation.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/annotations/Annotation.java @@ -13,7 +13,7 @@ import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.xpack.core.ml.job.config.Job; -import org.elasticsearch.xpack.core.ml.utils.time.TimeUtils; +import org.elasticsearch.xpack.core.common.time.TimeUtils; import java.io.IOException; import java.util.Date; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/calendars/ScheduledEvent.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/calendars/ScheduledEvent.java index afd10e0c17b..03bb25d3652 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/calendars/ScheduledEvent.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/calendars/ScheduledEvent.java @@ -23,7 +23,7 @@ import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.core.ml.utils.Intervals; import org.elasticsearch.xpack.core.ml.utils.ToXContentParams; -import org.elasticsearch.xpack.core.ml.utils.time.TimeUtils; +import org.elasticsearch.xpack.core.common.time.TimeUtils; import java.io.IOException; import java.time.Instant; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java index 8c5e86b602c..062b6d82f16 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java @@ -34,7 +34,7 @@ import org.elasticsearch.xpack.core.ml.utils.MlStrings; import org.elasticsearch.xpack.core.ml.utils.QueryProvider; import org.elasticsearch.xpack.core.ml.utils.ToXContentParams; import org.elasticsearch.xpack.core.ml.utils.XContentObjectTransformer; -import org.elasticsearch.xpack.core.ml.utils.time.TimeUtils; +import org.elasticsearch.xpack.core.common.time.TimeUtils; import java.io.IOException; import java.util.ArrayList; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DelayedDataCheckConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DelayedDataCheckConfig.java index 9406b91d119..52de35af1fa 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DelayedDataCheckConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DelayedDataCheckConfig.java @@ -17,7 +17,7 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.xpack.core.ml.utils.time.TimeUtils; +import org.elasticsearch.xpack.core.common.time.TimeUtils; import java.io.IOException; import java.util.Objects; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java index 933188c8221..27c3f8d1c68 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java @@ -18,7 +18,7 @@ import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; -import org.elasticsearch.xpack.core.ml.utils.time.TimeUtils; +import org.elasticsearch.xpack.core.common.time.TimeUtils; import java.io.IOException; import java.util.ArrayList; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java index de37702fe52..a5c8c2ae421 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java @@ -26,7 +26,7 @@ import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndexFiel import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCounts; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.core.ml.utils.MlStrings; -import org.elasticsearch.xpack.core.ml.utils.time.TimeUtils; +import org.elasticsearch.xpack.core.common.time.TimeUtils; import java.io.IOException; import java.util.ArrayList; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/output/FlushAcknowledgement.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/output/FlushAcknowledgement.java index ff47cfe1ca8..932e51a60f1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/output/FlushAcknowledgement.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/output/FlushAcknowledgement.java @@ -13,7 +13,7 @@ import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.ml.utils.time.TimeUtils; +import org.elasticsearch.xpack.core.common.time.TimeUtils; import java.io.IOException; import java.util.Date; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/DataCounts.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/DataCounts.java index 8d542ce25af..13e6459ca37 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/DataCounts.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/DataCounts.java @@ -14,7 +14,7 @@ import org.elasticsearch.common.xcontent.ObjectParser.ValueType; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.xpack.core.ml.job.config.Job; -import org.elasticsearch.xpack.core.ml.utils.time.TimeUtils; +import org.elasticsearch.xpack.core.common.time.TimeUtils; import java.io.IOException; import java.util.Date; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSizeStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSizeStats.java index f02120433ef..2e78ab39fe6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSizeStats.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSizeStats.java @@ -16,7 +16,7 @@ import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.results.Result; -import org.elasticsearch.xpack.core.ml.utils.time.TimeUtils; +import org.elasticsearch.xpack.core.common.time.TimeUtils; import java.io.IOException; import java.util.Date; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSnapshot.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSnapshot.java index e1933ef1a59..dbf2643b880 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSnapshot.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSnapshot.java @@ -22,7 +22,7 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.xpack.core.ml.job.config.Job; -import org.elasticsearch.xpack.core.ml.utils.time.TimeUtils; +import org.elasticsearch.xpack.core.common.time.TimeUtils; import java.io.IOException; import java.io.InputStream; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/AnomalyRecord.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/AnomalyRecord.java index 3c099e30924..6bd9d147b82 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/AnomalyRecord.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/AnomalyRecord.java @@ -17,7 +17,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.xpack.core.ml.job.config.Detector; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; -import org.elasticsearch.xpack.core.ml.utils.time.TimeUtils; +import org.elasticsearch.xpack.core.common.time.TimeUtils; import org.elasticsearch.Version; import java.io.IOException; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Bucket.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Bucket.java index d335ba39e00..a92b417b6d4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Bucket.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Bucket.java @@ -16,7 +16,7 @@ import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; -import org.elasticsearch.xpack.core.ml.utils.time.TimeUtils; +import org.elasticsearch.xpack.core.common.time.TimeUtils; import java.io.IOException; import java.util.ArrayList; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/BucketInfluencer.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/BucketInfluencer.java index e6031b3b8df..8cb06b6e0a2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/BucketInfluencer.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/BucketInfluencer.java @@ -15,7 +15,7 @@ import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; -import org.elasticsearch.xpack.core.ml.utils.time.TimeUtils; +import org.elasticsearch.xpack.core.common.time.TimeUtils; import java.io.IOException; import java.util.Date; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Forecast.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Forecast.java index 03a9b801167..5f4e3c829c3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Forecast.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Forecast.java @@ -14,7 +14,7 @@ import org.elasticsearch.common.xcontent.ObjectParser.ValueType; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.xpack.core.ml.job.config.Job; -import org.elasticsearch.xpack.core.ml.utils.time.TimeUtils; +import org.elasticsearch.xpack.core.common.time.TimeUtils; import java.io.IOException; import java.util.Date; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Influencer.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Influencer.java index d226058bf1d..d17b375459b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Influencer.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Influencer.java @@ -15,7 +15,7 @@ import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; -import org.elasticsearch.xpack.core.ml.utils.time.TimeUtils; +import org.elasticsearch.xpack.core.common.time.TimeUtils; import java.io.IOException; import java.util.Date; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ModelPlot.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ModelPlot.java index c17ed54c788..80e5fc241e4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ModelPlot.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ModelPlot.java @@ -15,7 +15,7 @@ import org.elasticsearch.common.xcontent.ObjectParser.ValueType; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.xpack.core.ml.job.config.Job; -import org.elasticsearch.xpack.core.ml.utils.time.TimeUtils; +import org.elasticsearch.xpack.core.common.time.TimeUtils; import java.io.IOException; import java.util.Date; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/notifications/AuditMessage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/notifications/AuditMessage.java index c3328bb3263..6daa4223afd 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/notifications/AuditMessage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/notifications/AuditMessage.java @@ -11,7 +11,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.xpack.core.common.notifications.AbstractAuditMessage; import org.elasticsearch.xpack.core.common.notifications.Level; import org.elasticsearch.xpack.core.ml.job.config.Job; -import org.elasticsearch.xpack.core.ml.utils.time.TimeUtils; +import org.elasticsearch.xpack.core.common.time.TimeUtils; import java.util.Date; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/time/TimeUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/time/TimeUtils.java deleted file mode 100644 index ea0994dad71..00000000000 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/time/TimeUtils.java +++ /dev/null @@ -1,129 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.core.ml.utils.time; - -import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.index.mapper.DateFieldMapper; - -import java.io.IOException; -import java.util.Date; -import java.util.concurrent.TimeUnit; - -public final class TimeUtils { - - private TimeUtils() { - // Do nothing - } - - public static Date parseTimeField(XContentParser parser, String fieldName) throws IOException { - if (parser.currentToken() == XContentParser.Token.VALUE_NUMBER) { - return new Date(parser.longValue()); - } else if (parser.currentToken() == XContentParser.Token.VALUE_STRING) { - return new Date(TimeUtils.dateStringToEpoch(parser.text())); - } - throw new IllegalArgumentException( - "unexpected token [" + parser.currentToken() + "] for [" + fieldName + "]"); - } - - /** - * First tries to parse the date first as a Long and convert that to an - * epoch time. If the long number has more than 10 digits it is considered a - * time in milliseconds else if 10 or less digits it is in seconds. If that - * fails it tries to parse the string using - * {@link DateFieldMapper#DEFAULT_DATE_TIME_FORMATTER} - * - * If the date string cannot be parsed -1 is returned. - * - * @return The epoch time in milliseconds or -1 if the date cannot be - * parsed. - */ - public static long dateStringToEpoch(String date) { - try { - long epoch = Long.parseLong(date); - if (date.trim().length() <= 10) { // seconds - return epoch * 1000; - } else { - return epoch; - } - } catch (NumberFormatException nfe) { - // not a number - } - - try { - return DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis(date); - } catch (ElasticsearchParseException | IllegalArgumentException e) { - } - // Could not do the conversion - return -1; - } - - /** - * Checks that the given {@code timeValue} is a non-negative multiple value of the {@code baseUnit}. - * - *
    - *
  • 400ms is valid for base unit of seconds
  • - *
  • 450ms is invalid for base unit of seconds but valid for base unit of milliseconds
  • - *
- */ - public static void checkNonNegativeMultiple(TimeValue timeValue, TimeUnit baseUnit, ParseField field) { - checkNonNegative(timeValue, field); - checkMultiple(timeValue, baseUnit, field); - } - - /** - * Checks that the given {@code timeValue} is a positive multiple value of the {@code baseUnit}. - * - *
    - *
  • 400ms is valid for base unit of seconds
  • - *
  • 450ms is invalid for base unit of seconds but valid for base unit of milliseconds
  • - *
- */ - public static void checkPositiveMultiple(TimeValue timeValue, TimeUnit baseUnit, ParseField field) { - checkPositive(timeValue, field); - checkMultiple(timeValue, baseUnit, field); - } - - /** - * Checks that the given {@code timeValue} is positive. - * - *
    - *
  • 1s is valid
  • - *
  • -1s is invalid
  • - *
- */ - public static void checkPositive(TimeValue timeValue, ParseField field) { - long nanos = timeValue.getNanos(); - if (nanos <= 0) { - throw new IllegalArgumentException(field.getPreferredName() + " cannot be less or equal than 0. Value = " - + timeValue.toString()); - } - } - - private static void checkNonNegative(TimeValue timeValue, ParseField field) { - long nanos = timeValue.getNanos(); - if (nanos < 0) { - throw new IllegalArgumentException(field.getPreferredName() + " cannot be less than 0. Value = " + timeValue.toString()); - } - } - - - - /** - * Check the given {@code timeValue} is a multiple of the {@code baseUnit} - */ - public static void checkMultiple(TimeValue timeValue, TimeUnit baseUnit, ParseField field) { - long nanos = timeValue.getNanos(); - TimeValue base = new TimeValue(1, baseUnit); - long baseNanos = base.getNanos(); - if (nanos % baseNanos != 0) { - throw new IllegalArgumentException(field.getPreferredName() + " has to be a multiple of " + base.toString() + "; actual was '" - + timeValue.toString() + "'"); - } - } -} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditMessageTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditMessageTests.java index 8fb42569837..e87e2cb0d93 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditMessageTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditMessageTests.java @@ -10,7 +10,7 @@ import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; -import org.elasticsearch.xpack.core.ml.utils.time.TimeUtils; +import org.elasticsearch.xpack.core.common.time.TimeUtils; import org.junit.Before; import java.util.Date; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/time/TimeUtilsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/time/TimeUtilsTests.java similarity index 64% rename from x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/time/TimeUtilsTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/time/TimeUtilsTests.java index d33968a37cf..e122202b5fa 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/time/TimeUtilsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/time/TimeUtilsTests.java @@ -3,18 +3,61 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.utils.time; +package org.elasticsearch.xpack.core.common.time; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.core.ml.utils.time.TimeUtils; +import java.io.IOException; +import java.time.Instant; +import java.util.Date; import java.util.concurrent.TimeUnit; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; + public class TimeUtilsTests extends ESTestCase { - public void testdateStringToEpoch() { + public void testParseTimeField() throws IOException { + try (XContentParser parser = createParser(JsonXContent.jsonXContent, "123456789")) { + parser.nextToken(); + Date date = TimeUtils.parseTimeField(parser, "my_time_field"); + assertThat(date.getTime(), equalTo(123456789L)); + } + try (XContentParser parser = createParser(JsonXContent.jsonXContent, "\"2016-05-01T10:00:00.333-0030\"")) { + parser.nextToken(); + Date date = TimeUtils.parseTimeField(parser, "my_time_field"); + assertThat(date.getTime(), equalTo(1462098600333L)); + } + try (XContentParser parser = createParser(JsonXContent.jsonXContent, "{}")) { + parser.nextToken(); + Exception e = expectThrows(IllegalArgumentException.class, () -> TimeUtils.parseTimeField(parser, "my_time_field")); + assertThat(e.getMessage(), containsString("unexpected token [START_OBJECT] for [my_time_field]")); + } + } + + public void testParseTimeFieldToInstant() throws IOException { + try (XContentParser parser = createParser(JsonXContent.jsonXContent, "123456789")) { + parser.nextToken(); + Instant instant = TimeUtils.parseTimeFieldToInstant(parser, "my_time_field"); + assertThat(instant.toEpochMilli(), equalTo(123456789L)); + } + try (XContentParser parser = createParser(JsonXContent.jsonXContent, "\"2016-05-01T10:00:00.333-0030\"")) { + parser.nextToken(); + Instant instant = TimeUtils.parseTimeFieldToInstant(parser, "my_time_field"); + assertThat(instant.toEpochMilli(), equalTo(1462098600333L)); + } + try (XContentParser parser = createParser(JsonXContent.jsonXContent, "{}")) { + parser.nextToken(); + Exception e = expectThrows(IllegalArgumentException.class, () -> TimeUtils.parseTimeFieldToInstant(parser, "my_time_field")); + assertThat(e.getMessage(), containsString("unexpected token [START_OBJECT] for [my_time_field]")); + } + } + + public void testDateStringToEpoch() { assertEquals(1462096800000L, TimeUtils.dateStringToEpoch("2016-05-01T10:00:00Z")); assertEquals(1462096800333L, TimeUtils.dateStringToEpoch("2016-05-01T10:00:00.333Z")); assertEquals(1462096800334L, TimeUtils.dateStringToEpoch("2016-05-01T10:00:00.334+00")); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/params/FlushJobParams.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/params/FlushJobParams.java index fd813e27fda..354b2d4c1b6 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/params/FlushJobParams.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/params/FlushJobParams.java @@ -9,7 +9,7 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Strings; import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; -import org.elasticsearch.xpack.core.ml.utils.time.TimeUtils; +import org.elasticsearch.xpack.core.common.time.TimeUtils; import java.util.Objects; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/params/TimeRange.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/params/TimeRange.java index a14d810d0d2..d99a8351c7b 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/params/TimeRange.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/params/TimeRange.java @@ -9,7 +9,7 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; -import org.elasticsearch.xpack.core.ml.utils.time.TimeUtils; +import org.elasticsearch.xpack.core.common.time.TimeUtils; import java.util.Objects; From 42cb59f7b4019d3199a346f36aaf0396ae94ef6c Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Thu, 27 Jun 2019 13:58:42 -0700 Subject: [PATCH 14/42] [DOCS] Updates ML APIs to use new API template (#43711) --- docs/reference/ml/apis/close-job.asciidoc | 19 +++--- .../ml/apis/delete-calendar-event.asciidoc | 17 +++--- .../ml/apis/delete-calendar-job.asciidoc | 25 ++++---- .../ml/apis/delete-calendar.asciidoc | 15 ++--- .../ml/apis/delete-datafeed.asciidoc | 26 ++++---- .../ml/apis/delete-expired-data.asciidoc | 15 +++-- docs/reference/ml/apis/delete-filter.asciidoc | 15 ++--- .../ml/apis/delete-forecast.asciidoc | 34 ++++++----- docs/reference/ml/apis/delete-job.asciidoc | 22 +++---- .../ml/apis/delete-snapshot.asciidoc | 21 +++---- .../ml/apis/find-file-structure.asciidoc | 42 ++++++------- docs/reference/ml/apis/flush-job.asciidoc | 27 +++++---- docs/reference/ml/apis/forecast.asciidoc | 21 +++---- docs/reference/ml/apis/get-bucket.asciidoc | 43 ++++++------- .../ml/apis/get-calendar-event.asciidoc | 24 ++++---- docs/reference/ml/apis/get-calendar.asciidoc | 20 +++---- docs/reference/ml/apis/get-category.asciidoc | 30 +++++----- .../ml/apis/get-datafeed-stats.asciidoc | 18 +++--- docs/reference/ml/apis/get-datafeed.asciidoc | 18 +++--- docs/reference/ml/apis/get-filter.asciidoc | 20 +++---- .../reference/ml/apis/get-influencer.asciidoc | 36 +++++------ docs/reference/ml/apis/get-job-stats.asciidoc | 18 +++--- docs/reference/ml/apis/get-job.asciidoc | 16 ++--- docs/reference/ml/apis/get-ml-info.asciidoc | 18 +++--- .../ml/apis/get-overall-buckets.asciidoc | 39 ++++++------ docs/reference/ml/apis/get-record.asciidoc | 36 +++++------ docs/reference/ml/apis/get-snapshot.asciidoc | 30 +++++----- docs/reference/ml/apis/open-job.asciidoc | 25 ++++---- .../ml/apis/post-calendar-event.asciidoc | 24 ++++---- docs/reference/ml/apis/post-data.asciidoc | 33 +++++----- .../ml/apis/preview-datafeed.asciidoc | 38 ++++++------ .../ml/apis/put-calendar-job.asciidoc | 26 ++++---- docs/reference/ml/apis/put-calendar.asciidoc | 18 +++--- docs/reference/ml/apis/put-datafeed.asciidoc | 60 +++++++++---------- docs/reference/ml/apis/put-filter.asciidoc | 20 +++---- docs/reference/ml/apis/put-job.asciidoc | 40 ++++++------- .../ml/apis/revert-snapshot.asciidoc | 24 ++++---- .../ml/apis/set-upgrade-mode.asciidoc | 18 +++--- .../reference/ml/apis/start-datafeed.asciidoc | 35 +++++------ docs/reference/ml/apis/stop-datafeed.asciidoc | 26 ++++---- .../ml/apis/update-datafeed.asciidoc | 51 ++++++++-------- docs/reference/ml/apis/update-filter.asciidoc | 43 ++++++------- docs/reference/ml/apis/update-job.asciidoc | 19 +++--- .../ml/apis/update-snapshot.asciidoc | 29 ++++----- .../ml/apis/validate-detector.asciidoc | 14 ++--- docs/reference/ml/apis/validate-job.asciidoc | 14 ++--- 46 files changed, 615 insertions(+), 607 deletions(-) diff --git a/docs/reference/ml/apis/close-job.asciidoc b/docs/reference/ml/apis/close-job.asciidoc index fa96b18777d..8eb78cff006 100644 --- a/docs/reference/ml/apis/close-job.asciidoc +++ b/docs/reference/ml/apis/close-job.asciidoc @@ -22,6 +22,13 @@ operations, but you can still explore and navigate results. `POST _ml/anomaly_detectors/_all/_close` + +[[ml-close-job-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `manage_ml` or +`manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. + [[ml-close-job-desc]] ==== {api-description-title} @@ -52,27 +59,21 @@ results the job might have recently produced or might produce in the future. [[ml-close-job-path-parms]] ==== {api-path-parms-title} -`job_id`:: +`` (Required):: (string) Identifier for the job. It can be a job identifier, a group name, or a wildcard expression. [[ml-close-job-query-parms]] ==== {api-query-parms-title} -`force`:: +`force` (Optional):: (boolean) Use to close a failed job, or to forcefully close a job which has not responded to its initial close request. -`timeout`:: +`timeout` (Optional):: (time units) Controls the time to wait until a job has closed. The default value is 30 minutes. -[[ml-close-job-prereqs]] -==== {api-prereq-title} - -You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see {stack-ov}/security-privileges.html[Security privileges]. - [[ml-close-job-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/delete-calendar-event.asciidoc b/docs/reference/ml/apis/delete-calendar-event.asciidoc index bc99398991b..0aa9ce5cc8d 100644 --- a/docs/reference/ml/apis/delete-calendar-event.asciidoc +++ b/docs/reference/ml/apis/delete-calendar-event.asciidoc @@ -13,6 +13,13 @@ Deletes scheduled events from a calendar. `DELETE _ml/calendars//events/` +[[ml-delete-calendar-event-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `manage_ml` or +`manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. + [[ml-delete-calendar-event-desc]] ==== {api-description-title} @@ -23,19 +30,13 @@ events and delete the calendar, see the [[ml-delete-calendar-event-path-parms]] ==== {api-path-parms-title} -`calendar_id`(required):: +`` (Required):: (string) Identifier for the calendar. -`event_id` (required):: +`` (Required):: (string) Identifier for the scheduled event. You can obtain this identifier by using the <>. -[[ml-delete-calendar-event-prereqs]] -==== {api-prereq-title} - -You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see {stack-ov}/security-privileges.html[Security privileges]. - [[ml-delete-calendar-event-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/delete-calendar-job.asciidoc b/docs/reference/ml/apis/delete-calendar-job.asciidoc index 9451734c230..a555b3d3b92 100644 --- a/docs/reference/ml/apis/delete-calendar-job.asciidoc +++ b/docs/reference/ml/apis/delete-calendar-job.asciidoc @@ -13,21 +13,22 @@ Deletes jobs from a calendar. `DELETE _ml/calendars//jobs/` -[[ml-delete-calendar-job-path-parms]] -==== {api-path-parms-title} - -`calendar_id`(required):: - (string) Identifier for the calendar. - -`job_id` (required):: - (string) An identifier for the job. It can be a job identifier, a group name, or a - comma-separated list of jobs or groups. - [[ml-delete-calendar-job-prereqs]] ==== {api-prereq-title} -You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see {stack-ov}/security-privileges.html[Security privileges]. +* If the {es} {security-features} are enabled, you must have `manage_ml` or +`manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. + +[[ml-delete-calendar-job-path-parms]] +==== {api-path-parms-title} + +`` (Required):: + (string) Identifier for the calendar. + +`` (Required):: + (string) An identifier for the job. It can be a job identifier, a group name, + or a comma-separated list of jobs or groups. [[ml-delete-calendar-job-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/delete-calendar.asciidoc b/docs/reference/ml/apis/delete-calendar.asciidoc index c07eb37c93d..065c117c49c 100644 --- a/docs/reference/ml/apis/delete-calendar.asciidoc +++ b/docs/reference/ml/apis/delete-calendar.asciidoc @@ -13,6 +13,13 @@ Deletes a calendar. `DELETE _ml/calendars/` +[[ml-delete-calendar-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `manage_ml` or +`manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. + [[ml-delete-calendar-desc]] ==== {api-description-title} @@ -22,15 +29,9 @@ calendar. [[ml-delete-calendar-path-parms]] ==== {api-path-parms-title} -`calendar_id` (required):: +`` (Required):: (string) Identifier for the calendar. -[[ml-delete-calendar-prereqs]] -==== {api-prereq-title} - -You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see {stack-ov}/security-privileges.html[Security privileges]. - [[ml-delete-calendar-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/delete-datafeed.asciidoc b/docs/reference/ml/apis/delete-datafeed.asciidoc index 9686959427d..23917bf9e33 100644 --- a/docs/reference/ml/apis/delete-datafeed.asciidoc +++ b/docs/reference/ml/apis/delete-datafeed.asciidoc @@ -15,29 +15,31 @@ Deletes an existing {dfeed}. `DELETE _ml/datafeeds/` +[[ml-delete-datafeed-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `manage_ml` or +`manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. + [[ml-delete-datafeed-desc]] ==== {api-description-title} -NOTE: Unless the `force` parameter is used, the {dfeed} must be stopped before it can be deleted. +NOTE: Unless you use the `force` parameter, you must stop the {dfeed} before you +can delete it. [[ml-delete-datafeed-path-parms]] ==== {api-path-parms-title} -`feed_id` (required):: - (string) Identifier for the {dfeed} +`` (Required):: + (string) Identifier for the {dfeed}. [[ml-delete-datafeed-query-parms]] ==== {api-query-parms-title} -`force`:: - (boolean) Use to forcefully delete a started {dfeed}; this method is quicker than - stopping and deleting the {dfeed}. - -[[ml-delete-datafeed-prereqs]] -==== {api-prereq-title} - -You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see {stack-ov}/security-privileges.html[Security privileges]. +`force` (Optional):: + (boolean) Use to forcefully delete a started {dfeed}; this method is quicker + than stopping and deleting the {dfeed}. [[ml-delete-datafeed-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/delete-expired-data.asciidoc b/docs/reference/ml/apis/delete-expired-data.asciidoc index 56ca1871329..ada9ec1c8c3 100644 --- a/docs/reference/ml/apis/delete-expired-data.asciidoc +++ b/docs/reference/ml/apis/delete-expired-data.asciidoc @@ -13,6 +13,13 @@ Deletes expired and unused machine learning data. `DELETE _ml/_delete_expired_data` +[[ml-delete-expired-data-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `manage_ml` or +`manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. + [[ml-delete-expired-data-desc]] ==== {api-description-title} @@ -20,14 +27,6 @@ Deletes all job results, model snapshots and forecast data that have exceeded their `retention days` period. Machine learning state documents that are not associated with any job are also deleted. -[[ml-delete-expired-data-prereqs]] -==== {api-prereq-title} - -You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see -{stack-ov}/security-privileges.html[Security Privileges] and -{stack-ov}/built-in-roles.html[Built-in Roles]. - [[ml-delete-expired-data-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/delete-filter.asciidoc b/docs/reference/ml/apis/delete-filter.asciidoc index 8d6797448ec..1962db29ad7 100644 --- a/docs/reference/ml/apis/delete-filter.asciidoc +++ b/docs/reference/ml/apis/delete-filter.asciidoc @@ -13,6 +13,13 @@ Deletes a filter. `DELETE _ml/filters/` +[[ml-delete-filter-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `manage_ml` or +`manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. + [[ml-delete-filter-desc]] ==== {api-description-title} @@ -23,15 +30,9 @@ update or delete the job before you can delete the filter. [[ml-delete-filter-path-parms]] ==== {api-path-parms-title} -`filter_id` (required):: +`` (Required):: (string) Identifier for the filter. -[[ml-delete-filter-prereqs]] -==== {api-prereq-title} - -You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see {xpack-ref}/security-privileges.html[Security Privileges]. - [[ml-delete-filter-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/delete-forecast.asciidoc b/docs/reference/ml/apis/delete-forecast.asciidoc index 8332d07f840..aac054217fc 100644 --- a/docs/reference/ml/apis/delete-forecast.asciidoc +++ b/docs/reference/ml/apis/delete-forecast.asciidoc @@ -17,47 +17,51 @@ Deletes forecasts from a {ml} job. `DELETE _ml/anomaly_detectors//_forecast/_all` +[[ml-delete-forecast-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `manage_ml` or +`manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. + [[ml-delete-forecast-desc]] ==== {api-description-title} By default, forecasts are retained for 14 days. You can specify a different -retention period with the `expires_in` parameter in the <>. The delete forecast API enables you to delete one or more forecasts before they expire. +retention period with the `expires_in` parameter in the +<>. The delete forecast API enables you to delete +one or more forecasts before they expire. -NOTE: When you delete a job its associated forecasts are deleted. +NOTE: When you delete a job, its associated forecasts are deleted. -For more information, see {stack-ov}/ml-overview.html#ml-forecasting[Forecasting the Future]. +For more information, see +{stack-ov}/ml-overview.html#ml-forecasting[Forecasting the future]. [[ml-delete-forecast-path-parms]] ==== {api-path-parms-title} -`job_id` (required):: +`` (Required):: (string) Identifier for the job. -`forecast_id`:: +`forecast_id` (Optional):: (string) A comma-separated list of forecast identifiers. If you do not specify this optional parameter or if you specify `_all`, the API deletes all forecasts from the job. -[[ml-delete-forecast-request-body]] -==== {api-request-body-title} +[[ml-delete-forecast-query-parms]] +==== {api-query-parms-title} -`allow_no_forecasts`:: +`allow_no_forecasts` (Optional):: (boolean) Specifies whether an error occurs when there are no forecasts. In particular, if this parameter is set to `false` and there are no forecasts associated with the job, attempts to delete all forecasts return an error. The default value is `true`. -`timeout`:: +`timeout` (Optional):: (time units) Specifies the period of time to wait for the completion of the delete operation. When this period of time elapses, the API fails and returns an error. The default value is `30s`. For more information about time units, see <>. - -[[ml-delete-forecast-prereqs]] -==== {api-prereq-title} - -You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see {stack-ov}/security-privileges.html[Security Privileges]. [[ml-delete-forecast-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/delete-job.asciidoc b/docs/reference/ml/apis/delete-job.asciidoc index 94042ba3565..efd172ef5fb 100644 --- a/docs/reference/ml/apis/delete-job.asciidoc +++ b/docs/reference/ml/apis/delete-job.asciidoc @@ -13,6 +13,13 @@ Deletes an existing anomaly detection job. `DELETE _ml/anomaly_detectors/` +[[ml-delete-job-prereqs]] +==== {api-prereq-title} + +* If {es} {security-features} are enabled, you must have `manage_ml` or `manage` +cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. + [[ml-delete-job-desc]] ==== {api-description-title} @@ -33,27 +40,20 @@ separated list. [[ml-delete-job-path-parms]] ==== {api-path-parms-title} -`job_id` (required):: - (string) Identifier for the job +`` (Required):: + (string) Identifier for the job. [[ml-delete-job-query-parms]] ==== {api-query-parms-title} -`force`:: +`force` (Optional):: (boolean) Use to forcefully delete an opened job; this method is quicker than closing and deleting the job. -`wait_for_completion`:: +`wait_for_completion` (Optional):: (boolean) Specifies whether the request should return immediately or wait until the job deletion completes. Defaults to `true`. -[[ml-delete-job-prereqs]] -==== {api-prereq-title} - -If {es} {security-features} are enabled, you must have `manage_ml`, or `manage` -cluster privileges to use this API. -For more information, see {stack-ov}/security-privileges.html[Security Privileges]. - [[ml-delete-job-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/delete-snapshot.asciidoc b/docs/reference/ml/apis/delete-snapshot.asciidoc index 461f7fb4227..0e696f2a011 100644 --- a/docs/reference/ml/apis/delete-snapshot.asciidoc +++ b/docs/reference/ml/apis/delete-snapshot.asciidoc @@ -13,6 +13,13 @@ Deletes an existing model snapshot. `DELETE _ml/anomaly_detectors//model_snapshots/` +[[ml-delete-snapshot-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `manage_ml` or +`manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. + [[ml-delete-snapshot-desc]] ==== {api-description-title} @@ -23,17 +30,11 @@ the `model_snapshot_id` in the results from the get jobs API. [[ml-delete-snapshot-path-parms]] ==== {api-path-parms-title} -`job_id` (required):: - (string) Identifier for the job +`` (Required):: + (string) Identifier for the job. -`snapshot_id` (required):: - (string) Identifier for the model snapshot - -[[ml-delete-snapshot-prereqs]] -==== {api-prereq-title} - -You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see {xpack-ref}/security-privileges.html[Security Privileges]. +`` (Required):: + (string) Identifier for the model snapshot. [[ml-delete-snapshot-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/find-file-structure.asciidoc b/docs/reference/ml/apis/find-file-structure.asciidoc index ead3087f3d8..212e80c7e1b 100644 --- a/docs/reference/ml/apis/find-file-structure.asciidoc +++ b/docs/reference/ml/apis/find-file-structure.asciidoc @@ -16,6 +16,13 @@ suitable to be ingested into {es}. `POST _ml/find_file_structure` +[[ml-find-file-structure-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `monitor_ml` or +`monitor` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. + [[ml-find-file-structure-desc]] ==== {api-description-title} @@ -51,36 +58,36 @@ chosen. [[ml-find-file-structure-query-parms]] ==== {api-query-parms-title} -`charset`:: +`charset` (Optional):: (string) The file's character set. It must be a character set that is supported by the JVM that {es} uses. For example, `UTF-8`, `UTF-16LE`, `windows-1252`, or `EUC-JP`. If this parameter is not specified, the structure finder chooses an appropriate character set. -`column_names`:: +`column_names` (Optional):: (string) If you have set `format` to `delimited`, you can specify the column names in a comma-separated list. If this parameter is not specified, the structure finder uses the column names from the header row of the file. If the file does not have a header role, columns are named "column1", "column2", "column3", etc. -`delimiter`:: +`delimiter` (Optional):: (string) If you have set `format` to `delimited`, you can specify the character used to delimit the values in each row. Only a single character is supported; the delimiter cannot have multiple characters. If this parameter is not specified, the structure finder considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). -`explain`:: +`explain` (Optional):: (boolean) If this parameter is set to `true`, the response includes a field named `explanation`, which is an array of strings that indicate how the structure finder produced its result. The default value is `false`. -`format`:: +`format` (Optional):: (string) The high level structure of the file. Valid values are `ndjson`, `xml`, `delimited`, and `semi_structured_text`. If this parameter is not specified, the structure finder chooses one. -`grok_pattern`:: +`grok_pattern` (Optional):: (string) If you have set `format` to `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the file. The name of the timestamp field in the Grok pattern must match what is specified @@ -88,20 +95,20 @@ chosen. name of the timestamp field in the Grok pattern must match "timestamp". If `grok_pattern` is not specified, the structure finder creates a Grok pattern. -`has_header_row`:: +`has_header_row` (Optional):: (boolean) If you have set `format` to `delimited`, you can use this parameter to indicate whether the column names are in the first row of the file. If this parameter is not specified, the structure finder guesses based on the similarity of the first row of the file to other rows. -`line_merge_size_limit`:: +`line_merge_size_limit` (Optional):: (unsigned integer) The maximum number of characters in a message when lines are merged to form messages while analyzing semi-structured files. The default is 10000. If you have extremely long messages you may need to increase this, but be aware that this may lead to very long processing times if the way to group lines into messages is misdetected. -`lines_to_sample`:: +`lines_to_sample` (Optional):: (unsigned integer) The number of lines to include in the structural analysis, starting from the beginning of the file. The minimum is 2; the default is 1000. If the value of this parameter is greater than the number of lines in @@ -117,7 +124,7 @@ efficient to upload a sample file with more variety in the first 1000 lines than to request analysis of 100000 lines to achieve some variety. -- -`quote`:: +`quote` (Optional):: (string) If you have set `format` to `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. Only a single character is supported. If this parameter is not @@ -125,18 +132,18 @@ to request analysis of 100000 lines to achieve some variety. format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. -`should_trim_fields`:: +`should_trim_fields` (Optional):: (boolean) If you have set `format` to `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. If this parameter is not specified and the delimiter is pipe (`|`), the default value is `true`. Otherwise, the default value is `false`. -`timeout`:: +`timeout` (Optional):: (time) Sets the maximum amount of time that the structure analysis make take. If the analysis is still running when the timeout expires then it will be aborted. The default value is 25 seconds. -`timestamp_field`:: +`timestamp_field` (Optional):: (string) The name of the field that contains the primary timestamp of each record in the file. In particular, if the file were ingested into an index, this is the field that would be used to populate the `@timestamp` field. + @@ -155,7 +162,7 @@ field (if any) is the primary timestamp field. For structured file formats, it is not compulsory to have a timestamp in the file. -- -`timestamp_format`:: +`timestamp_format` (Optional):: (string) The Java time format of the timestamp field in the file. + + -- @@ -207,13 +214,6 @@ be ingested into {es}. It does not need to be in JSON format and it does not need to be UTF-8 encoded. The size is limited to the {es} HTTP receive buffer size, which defaults to 100 Mb. -[[ml-find-file-structure-prereqs]] -==== {api-prereq-title} - -You must have `monitor_ml`, or `monitor` cluster privileges to use this API. -For more information, see {stack-ov}/security-privileges.html[Security Privileges]. - - [[ml-find-file-structure-examples]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/flush-job.asciidoc b/docs/reference/ml/apis/flush-job.asciidoc index 6598f8155b9..590f866ca17 100644 --- a/docs/reference/ml/apis/flush-job.asciidoc +++ b/docs/reference/ml/apis/flush-job.asciidoc @@ -13,6 +13,13 @@ Forces any buffered data to be processed by the job. `POST _ml/anomaly_detectors//_flush` +[[ml-flush-job-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `manage_ml` or +`manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. + [[ml-flush-job-desc]] ==== {api-description-title} @@ -29,39 +36,33 @@ opened again before analyzing further data. [[ml-flush-job-path-parms]] ==== {api-path-parms-title} -`job_id` (required):: -(string) Identifier for the job +`` (Required):: +(string) Identifier for the job. [[ml-flush-job-query-parms]] ==== {api-query-parms-title} -`advance_time`:: +`advance_time` (Optional):: (string) Specifies to advance to a particular time value. Results are generated and the model is updated for data from the specified time interval. -`calc_interim`:: +`calc_interim` (Optional):: (boolean) If true, calculates the interim results for the most recent bucket or all buckets within the latency period. -`end`:: +`end` (Optional):: (string) When used in conjunction with `calc_interim`, specifies the range of buckets on which to calculate interim results. -`skip_time`:: +`skip_time` (Optional):: (string) Specifies to skip to a particular time value. Results are not generated and the model is not updated for data from the specified time interval. -`start`:: +`start` (Optional):: (string) When used in conjunction with `calc_interim`, specifies the range of buckets on which to calculate interim results. -[[ml-flush-job-prereqs]] -==== {api-prereq-title} - -You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see {xpack-ref}/security-privileges.html[Security Privileges]. - [[ml-flush-job-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/forecast.asciidoc b/docs/reference/ml/apis/forecast.asciidoc index 05bd250975d..d137b2e1be3 100644 --- a/docs/reference/ml/apis/forecast.asciidoc +++ b/docs/reference/ml/apis/forecast.asciidoc @@ -13,10 +13,17 @@ Predicts the future behavior of a time series by using its historical behavior. `POST _ml/anomaly_detectors//_forecast` +[[ml-forecast-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `manage_ml` or +`manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. + [[ml-forecast-desc]] ==== {api-description-title} -See {stack-ov}/ml-overview.html#ml-forecasting[Forecasting the Future]. +See {stack-ov}/ml-overview.html#ml-forecasting[Forecasting the future]. [NOTE] =============================== @@ -29,30 +36,24 @@ forecast. For more information about this property, see <>. [[ml-forecast-path-parms]] ==== {api-path-parms-title} -`job_id`:: +`` (Required):: (string) Identifier for the job. [[ml-forecast-request-body]] ==== {api-request-body-title} -`duration`:: +`duration` (Optional):: (time units) A period of time that indicates how far into the future to forecast. For example, `30d` corresponds to 30 days. The default value is 1 day. The forecast starts at the last record that was processed. For more information about time units, see <>. -`expires_in`:: +`expires_in` (Optional):: (time units) The period of time that forecast results are retained. After a forecast expires, the results are deleted. The default value is 14 days. If set to a value of `0`, the forecast is never automatically deleted. For more information about time units, see <>. -[[ml-forecast-prereqs]] -==== {api-prereq-title} - -You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see {xpack-ref}/security-privileges.html[Security Privileges]. - [[ml-forecast-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/get-bucket.asciidoc b/docs/reference/ml/apis/get-bucket.asciidoc index 0e2b7988e8e..2a73d0f5d35 100644 --- a/docs/reference/ml/apis/get-bucket.asciidoc +++ b/docs/reference/ml/apis/get-bucket.asciidoc @@ -15,6 +15,17 @@ Retrieves job results for one or more buckets. `GET _ml/anomaly_detectors//results/buckets/` +[[ml-get-bucket-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `monitor_ml`, +`monitor`, `manage_ml`, or `manage` cluster privileges to use this API. You also +need `read` index privilege on the index that stores the results. The +`machine_learning_admin` and `machine_learning_user` roles provide these +privileges. For more information, see +{stack-ov}/security-privileges.html[Security privileges] and +{stack-ov}/built-in-roles.html[Built-in roles]. + [[ml-get-bucket-desc]] ==== {api-description-title} @@ -24,44 +35,44 @@ bucket. [[ml-get-bucket-path-parms]] ==== {api-path-parms-title} -`job_id`:: +`` (Required):: (string) Identifier for the job -`timestamp`:: +`` (Optional):: (string) The timestamp of a single bucket result. - If you do not specify this optional parameter, the API returns information + If you do not specify this parameter, the API returns information about all buckets. [[ml-get-bucket-request-body]] ==== {api-request-body-title} -`anomaly_score`:: +`anomaly_score` (Optional):: (double) Returns buckets with anomaly scores greater or equal than this value. -`desc`:: +`desc` (Optional):: (boolean) If true, the buckets are sorted in descending order. -`end`:: +`end` (Optional):: (string) Returns buckets with timestamps earlier than this time. -`exclude_interim`:: +`exclude_interim` (Optional):: (boolean) If true, the output excludes interim results. By default, interim results are included. -`expand`:: +`expand` (Optional):: (boolean) If true, the output includes anomaly records. -`page`:: +`page` (Optional):: `from`::: (integer) Skips the specified number of buckets. `size`::: (integer) Specifies the maximum number of buckets to obtain. -`sort`:: +`sort` (Optional):: (string) Specifies the sort field for the requested buckets. By default, the buckets are sorted by the `timestamp` field. -`start`:: +`start` (Optional):: (string) Returns buckets with timestamps after this time. [[ml-get-bucket-results]] @@ -73,16 +84,6 @@ The API returns the following information: (array) An array of bucket objects. For more information, see <>. -[[ml-get-bucket-prereqs]] -==== {api-prereq-title} - -You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster -privileges to use this API. You also need `read` index privilege on the index -that stores the results. The `machine_learning_admin` and `machine_learning_user` -roles provide these privileges. For more information, see -{stack-ov}/security-privileges.html[Security Privileges] and -{stack-ov}/built-in-roles.html[Built-in Roles]. - [[ml-get-bucket-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/get-calendar-event.asciidoc b/docs/reference/ml/apis/get-calendar-event.asciidoc index 1ee94eff7b5..173a2494886 100644 --- a/docs/reference/ml/apis/get-calendar-event.asciidoc +++ b/docs/reference/ml/apis/get-calendar-event.asciidoc @@ -16,6 +16,13 @@ calendars. `GET _ml/calendars/_all/events` +[[ml-get-calendar-event-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `monitor_ml`, +`monitor`, `manage_ml`, or `manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. + [[ml-get-calendar-event-desc]] ==== {api-description-title} @@ -25,22 +32,22 @@ calendars by using `_all`. [[ml-get-calendar-event-path-parms]] ==== {api-path-parms-title} -`calendar_id` (required):: +`` (Required):: (string) Identifier for the calendar. [[ml-get-calendar-event-request-body]] ==== {api-request-body-title} -`end`:: +`end` (Optional):: (string) Specifies to get events with timestamps earlier than this time. -`from`:: +`from` (Optional):: (integer) Skips the specified number of events. -`size`:: +`size` (Optional):: (integer) Specifies the maximum number of events to obtain. -`start`:: +`start` (Optional):: (string) Specifies to get events with timestamps after this time. [[ml-get-calendar-event-results]] @@ -52,13 +59,6 @@ The API returns the following information: (array) An array of scheduled event resources. For more information, see <>. -[[ml-get-calendar-event-prereqs]] -==== {api-prereq-title} - -You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster -privileges to use this API. For more information, see -{stack-ov}/security-privileges.html[Security Privileges]. - [[ml-get-calendar-event-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/get-calendar.asciidoc b/docs/reference/ml/apis/get-calendar.asciidoc index 1ff9f8442c2..3d55f825bdb 100644 --- a/docs/reference/ml/apis/get-calendar.asciidoc +++ b/docs/reference/ml/apis/get-calendar.asciidoc @@ -15,6 +15,13 @@ Retrieves configuration information for calendars. `GET _ml/calendars/_all` +[[ml-get-calendar-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `monitor_ml`, +`monitor`, `manage_ml`, or `manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. + [[ml-get-calendar-desc]] ==== {api-description-title} @@ -24,17 +31,17 @@ You can get information for a single calendar or for all calendars by using [[ml-get-calendar-path-parms]] ==== {api-path-parms-title} -`calendar_id`:: +`` (Required):: (string) Identifier for the calendar. [[ml-get-calendar-request-body]] ==== {api-request-body-title} -`page`:: +`page` (Optional):: `from`::: (integer) Skips the specified number of calendars. -`size`::: +`size` (Optional)::: (integer) Specifies the maximum number of calendars to obtain. [[ml-get-calendar-results]] @@ -46,13 +53,6 @@ The API returns the following information: (array) An array of calendar resources. For more information, see <>. -[[ml-get-calendar-prereqs]] -==== {api-prereq-title} - -You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster -privileges to use this API. For more information, see -{stack-ov}/security-privileges.html[Security Privileges]. - [[ml-get-calendar-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/get-category.asciidoc b/docs/reference/ml/apis/get-category.asciidoc index 252f59c3ef2..6301eaf13a5 100644 --- a/docs/reference/ml/apis/get-category.asciidoc +++ b/docs/reference/ml/apis/get-category.asciidoc @@ -15,26 +15,36 @@ Retrieves job results for one or more categories. `GET _ml/anomaly_detectors//results/categories/` +[[ml-get-category-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `monitor_ml`, +`monitor`, `manage_ml`, or `manage` cluster privileges to use this API. You also +need `read` index privilege on the index that stores the results. The +`machine_learning_admin` and `machine_learning_user` roles provide these +privileges. See {stack-ov}/security-privileges.html[Security privileges] and +{stack-ov}/built-in-roles.html[Built-in roles]. + [[ml-get-category-desc]] ==== {api-description-title} For more information about categories, see -{stack-ov}/ml-configuring-categories.html[Categorizing Log Messages]. +{stack-ov}/ml-configuring-categories.html[Categorizing log messages]. [[ml-get-category-path-parms]] ==== {api-path-parms-title} -`job_id`:: +`` (Required):: (string) Identifier for the job. -`category_id`:: - (long) Identifier for the category. If you do not specify this optional parameter, +`` (Optional):: + (long) Identifier for the category. If you do not specify this parameter, the API returns information about all categories in the job. [[ml-get-category-request-body]] ==== {api-request-body-title} -`page`:: +`page` (Optional):: `from`::: (integer) Skips the specified number of categories. `size`::: @@ -49,16 +59,6 @@ The API returns the following information: (array) An array of category objects. For more information, see <>. -[[ml-get-category-prereqs]] -==== {api-prereq-title} - -You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster -privileges to use this API. You also need `read` index privilege on the index -that stores the results. The `machine_learning_admin` and `machine_learning_user` -roles provide these privileges. For more information, see -{stack-ov}/security-privileges.html[Security Privileges] and -{stack-ov}/built-in-roles.html[Built-in Roles]. - [[ml-get-category-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/get-datafeed-stats.asciidoc b/docs/reference/ml/apis/get-datafeed-stats.asciidoc index 1789478e081..6ce99785912 100644 --- a/docs/reference/ml/apis/get-datafeed-stats.asciidoc +++ b/docs/reference/ml/apis/get-datafeed-stats.asciidoc @@ -19,7 +19,14 @@ Retrieves usage information for {dfeeds}. `GET _ml/datafeeds/_stats` + -`GET _ml/datafeeds/_all/_stats` + +`GET _ml/datafeeds/_all/_stats` + +[[ml-get-datafeed-stats-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `monitor_ml`, +`monitor`, `manage_ml`, or `manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. [[ml-get-datafeed-stats-desc]] ==== {api-description-title} @@ -37,7 +44,7 @@ IMPORTANT: This API returns a maximum of 10,000 {dfeeds}. [[ml-get-datafeed-stats-path-parms]] ==== {api-path-parms-title} -`feed_id`:: +`` (Optional):: (string) Identifier for the {dfeed}. It can be a {dfeed} identifier or a wildcard expression. If you do not specify one of these options, the API returns statistics for all {dfeeds}. @@ -51,13 +58,6 @@ The API returns the following information: (array) An array of {dfeed} count objects. For more information, see <>. -[[ml-get-datafeed-stats-prereqs]] -==== {api-prereq-title} - -You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster -privileges to use this API. For more information, see -{stack-ov}/security-privileges.html[Security Privileges]. - [[ml-get-datafeed-stats-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/get-datafeed.asciidoc b/docs/reference/ml/apis/get-datafeed.asciidoc index 8cb08818277..abc79ae5c7d 100644 --- a/docs/reference/ml/apis/get-datafeed.asciidoc +++ b/docs/reference/ml/apis/get-datafeed.asciidoc @@ -19,7 +19,14 @@ Retrieves configuration information for {dfeeds}. `GET _ml/datafeeds/` + -`GET _ml/datafeeds/_all` + +`GET _ml/datafeeds/_all` + +[[ml-get-datafeed-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `monitor_ml`, +`monitor`, `manage_ml`, or `manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. [[ml-get-datafeed-desc]] ==== {api-description-title} @@ -34,7 +41,7 @@ IMPORTANT: This API returns a maximum of 10,000 {dfeeds}. [[ml-get-datafeed-path-parms]] ==== {api-path-parms-title} -`feed_id`:: +`` (Optional):: (string) Identifier for the {dfeed}. It can be a {dfeed} identifier or a wildcard expression. If you do not specify one of these options, the API returns information about all {dfeeds}. @@ -48,13 +55,6 @@ The API returns the following information: (array) An array of {dfeed} objects. For more information, see <>. -[[ml-get-datafeed-prereqs]] -==== {api-prereq-title} - -You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster -privileges to use this API. For more information, see -{stack-ov}/security-privileges.html[Security Privileges]. - [[ml-get-datafeed-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/get-filter.asciidoc b/docs/reference/ml/apis/get-filter.asciidoc index c69b7174272..ad5fee343f6 100644 --- a/docs/reference/ml/apis/get-filter.asciidoc +++ b/docs/reference/ml/apis/get-filter.asciidoc @@ -15,6 +15,13 @@ Retrieves filters. `GET _ml/filters/` +[[ml-get-filter-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `monitor_ml`, +`monitor`, `manage_ml`, or `manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. + [[ml-get-filter-desc]] ==== {api-description-title} @@ -24,16 +31,16 @@ You can get a single filter or all filters. For more information, see [[ml-get-filter-path-parms]] ==== {api-path-parms-title} -`filter_id`:: +`` (Optional):: (string) Identifier for the filter. [[ml-get-filter-query-parms]] ==== {api-query-parms-title} -`from`::: +`from` (Optional)::: (integer) Skips the specified number of filters. -`size`::: +`size` (Optional)::: (integer) Specifies the maximum number of filters to obtain. [[ml-get-filter-results]] @@ -45,13 +52,6 @@ The API returns the following information: (array) An array of filter resources. For more information, see <>. -[[ml-get-filter-prereqs]] -==== {api-prereq-title} - -You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster -privileges to use this API. For more information, see -{stack-ov}/security-privileges.html[Security Privileges]. - [[ml-get-filter-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/get-influencer.asciidoc b/docs/reference/ml/apis/get-influencer.asciidoc index fedcac20792..8d7ca889a26 100644 --- a/docs/reference/ml/apis/get-influencer.asciidoc +++ b/docs/reference/ml/apis/get-influencer.asciidoc @@ -13,39 +13,49 @@ Retrieves job results for one or more influencers. `GET _ml/anomaly_detectors//results/influencers` +[[ml-get-influencer-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `monitor_ml`, +`monitor`, `manage_ml`, or `manage` cluster privileges to use this API. You also +need `read` index privilege on the index that stores the results. The +`machine_learning_admin` and `machine_learning_user` roles provide these +privileges. See {stack-ov}/security-privileges.html[Security privileges] and +{stack-ov}/built-in-roles.html[Built-in roles]. + [[ml-get-influencer-path-parms]] ==== {api-path-parms-title} -`job_id`:: +`` (Required):: (string) Identifier for the job. [[ml-get-influencer-request-body]] ==== {api-request-body-title} -`desc`:: +`desc` (Optional):: (boolean) If true, the results are sorted in descending order. -`end`:: +`end` (Optional):: (string) Returns influencers with timestamps earlier than this time. -`exclude_interim`:: +`exclude_interim` (Optional):: (boolean) If true, the output excludes interim results. By default, interim results are included. -`influencer_score`:: +`influencer_score` (Optional):: (double) Returns influencers with anomaly scores greater or equal than this value. -`page`:: +`page` (Optional):: `from`::: (integer) Skips the specified number of influencers. `size`::: (integer) Specifies the maximum number of influencers to obtain. -`sort`:: +`sort` (Optional):: (string) Specifies the sort field for the requested influencers. By default the influencers are sorted by the `influencer_score` value. -`start`:: +`start` (Optional):: (string) Returns influencers with timestamps after this time. [[ml-get-influencer-results]] @@ -57,16 +67,6 @@ The API returns the following information: (array) An array of influencer objects. For more information, see <>. -[[ml-get-influencer-prereqs]] -==== {api-prereq-title} - -You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster -privileges to use this API. You also need `read` index privilege on the index -that stores the results. The `machine_learning_admin` and `machine_learning_user` -roles provide these privileges. For more information, see -{stack-ov}/security-privileges.html[Security Privileges] and -{stack-ov}/built-in-roles.html[Built-in Roles]. - [[ml-get-influencer-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/get-job-stats.asciidoc b/docs/reference/ml/apis/get-job-stats.asciidoc index 4b32b11abf8..8a705d7ff9e 100644 --- a/docs/reference/ml/apis/get-job-stats.asciidoc +++ b/docs/reference/ml/apis/get-job-stats.asciidoc @@ -17,7 +17,14 @@ Retrieves usage information for jobs. `GET _ml/anomaly_detectors/_stats` + -`GET _ml/anomaly_detectors/_all/_stats` + +`GET _ml/anomaly_detectors/_all/_stats` + +[[ml-get-job-stats-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `monitor_ml`, +`monitor`, `manage_ml`, or `manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. [[ml-get-job-stats-desc]] ==== {api-description-title} @@ -32,7 +39,7 @@ IMPORTANT: This API returns a maximum of 10,000 jobs. [[ml-get-job-stats-path-parms]] ==== {api-path-parms-title} -`job_id`:: +`` (Optional):: (string) An identifier for the job. It can be a job identifier, a group name, or a wildcard expression. If you do not specify one of these options, the API returns statistics for all jobs. @@ -46,13 +53,6 @@ The API returns the following information: (array) An array of job statistics objects. For more information, see <>. -[[ml-get-job-stats-prereqs]] -==== {api-prereq-title} - -You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster -privileges to use this API. For more information, see -{stack-ov}/security-privileges.html[Security Privileges]. - [[ml-get-job-stats-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/get-job.asciidoc b/docs/reference/ml/apis/get-job.asciidoc index a4bbb66b5d0..176ca09fc56 100644 --- a/docs/reference/ml/apis/get-job.asciidoc +++ b/docs/reference/ml/apis/get-job.asciidoc @@ -19,6 +19,13 @@ Retrieves configuration information for jobs. `GET _ml/anomaly_detectors/_all` +[[ml-get-job-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `monitor_ml`, +`monitor`, `manage_ml`, or `manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. + [[ml-get-job-desc]] ==== {api-description-title} @@ -32,7 +39,7 @@ IMPORTANT: This API returns a maximum of 10,000 jobs. [[ml-get-job-path-parms]] ==== {api-path-parms-title} -`job_id`:: +` (Optional)`:: (string) Identifier for the job. It can be a job identifier, a group name, or a wildcard expression. If you do not specify one of these options, the API returns information for all jobs. @@ -46,13 +53,6 @@ The API returns the following information: (array) An array of job resources. For more information, see <>. -[[ml-get-job-prereqs]] -==== {api-prereq-title} - -You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster -privileges to use this API. For more information, see -{stack-ov}/security-privileges.html[Security Privileges]. - [[ml-get-job-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/get-ml-info.asciidoc b/docs/reference/ml/apis/get-ml-info.asciidoc index b60a36eed29..2c486741ffd 100644 --- a/docs/reference/ml/apis/get-ml-info.asciidoc +++ b/docs/reference/ml/apis/get-ml-info.asciidoc @@ -15,6 +15,15 @@ Returns defaults and limits used by machine learning. `GET _ml/info` +[[get-ml-info-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `monitor_ml`, +`monitor`, `manage_ml`, or `manage` cluster privileges to use this API. The +`machine_learning_admin` and `machine_learning_user` roles provide these +privileges. See {stack-ov}/security-privileges.html[Security privileges] and +{stack-ov}/built-in-roles.html[Built-in roles]. + [[get-ml-info-desc]] ==== {api-description-title} @@ -23,15 +32,6 @@ understand machine learning configurations where some options are not specified, meaning that the defaults should be used. This endpoint may be used to find out what those defaults are. -[[get-ml-info-prereqs]] -==== {api-prereq-title} - -You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster -privileges to use this API. The `machine_learning_admin` and `machine_learning_user` -roles provide these privileges. For more information, see -{stack-ov}/security-privileges.html[Security privileges] and -{stack-ov}/built-in-roles.html[Built-in roles]. - [[get-ml-info-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/get-overall-buckets.asciidoc b/docs/reference/ml/apis/get-overall-buckets.asciidoc index 81c5c371ac4..4d8287f9a54 100644 --- a/docs/reference/ml/apis/get-overall-buckets.asciidoc +++ b/docs/reference/ml/apis/get-overall-buckets.asciidoc @@ -18,6 +18,16 @@ bucket results of multiple jobs. `GET _ml/anomaly_detectors/_all/results/overall_buckets` +[[ml-get-overall-buckets-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `monitor_ml`, +`monitor`, `manage_ml`, or `manage` cluster privileges to use this API. You also +need `read` index privilege on the index that stores the results. The +`machine_learning_admin` and `machine_learning_user` roles provide these +privileges. See {stack-ov}/security-privileges.html[Security privileges] and +{stack-ov}/built-in-roles.html[Built-in roles]. + [[ml-get-overall-buckets-desc]] ==== {api-description-title} @@ -46,37 +56,38 @@ overall buckets with a span equal to the largest job's `bucket_span`. [[ml-get-overall-buckets-path-parms]] ==== {api-path-parms-title} -`job_id`:: +`` (Required):: (string) Identifier for the job. It can be a job identifier, a group name, a comma-separated list of jobs or groups, or a wildcard expression. [[ml-get-overall-buckets-request-body]] ==== {api-request-body-title} -`allow_no_jobs`:: +`allow_no_jobs` (Optional):: (boolean) If `false` and the `job_id` does not match any job an error will be returned. The default value is `true`. -`bucket_span`:: +`bucket_span` (Optional):: (string) The span of the overall buckets. Must be greater or equal to the largest job's `bucket_span`. Defaults to the largest job's `bucket_span`. -`end`:: +`end` (Optional):: (string) Returns overall buckets with timestamps earlier than this time. -`exclude_interim`:: +`exclude_interim` (Optional):: (boolean) If `true`, the output excludes interim overall buckets. Overall buckets are interim if any of the job buckets within the overall bucket interval are interim. By default, interim results are included. -`overall_score`:: - (double) Returns overall buckets with overall scores greater or equal than this value. +`overall_score` (Optional):: + (double) Returns overall buckets with overall scores greater or equal than + this value. -`start`:: +`start` (Optional):: (string) Returns overall buckets with timestamps after this time. -`top_n`:: +`top_n` (Optional):: (integer) The number of top job bucket scores to be used in the `overall_score` calculation. The default value is `1`. @@ -89,16 +100,6 @@ The API returns the following information: (array) An array of overall bucket objects. For more information, see <>. -[[ml-get-overall-buckets-prereqs]] -==== {api-prereq-title} - -You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster -privileges to use this API. You also need `read` index privilege on the index -that stores the results. The `machine_learning_admin` and `machine_learning_user` -roles provide these privileges. For more information, see -{stack-ov}/security-privileges.html[Security Privileges] and -{stack-ov}/built-in-roles.html[Built-in Roles]. - [[ml-get-overall-buckets-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/get-record.asciidoc b/docs/reference/ml/apis/get-record.asciidoc index fec36aa4a56..0acc3e0e49f 100644 --- a/docs/reference/ml/apis/get-record.asciidoc +++ b/docs/reference/ml/apis/get-record.asciidoc @@ -13,39 +13,49 @@ Retrieves anomaly records for a job. `GET _ml/anomaly_detectors//results/records` +[[ml-get-record-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `monitor_ml`, +`monitor`, `manage_ml`, or `manage` cluster privileges to use this API. You also +need `read` index privilege on the index that stores the results. The +`machine_learning_admin` and `machine_learning_user` roles provide these +privileges. See {stack-ov}/security-privileges.html[Security privileges] and +{stack-ov}/built-in-roles.html[Built-in roles]. + [[ml-get-record-path-parms]] ==== {api-path-parms-title} -`job_id`:: +`job_id` (Required):: (string) Identifier for the job. [[ml-get-record-request-body]] ==== {api-request-body-title} -`desc`:: +`desc` (Optional):: (boolean) If true, the results are sorted in descending order. -`end`:: +`end` (Optional):: (string) Returns records with timestamps earlier than this time. -`exclude_interim`:: +`exclude_interim` (Optional):: (boolean) If true, the output excludes interim results. By default, interim results are included. -`page`:: +`page` (Optional):: `from`::: (integer) Skips the specified number of records. `size`::: (integer) Specifies the maximum number of records to obtain. -`record_score`:: +`record_score` (Optional):: (double) Returns records with anomaly scores greater or equal than this value. -`sort`:: +`sort` (Optional):: (string) Specifies the sort field for the requested records. By default, the records are sorted by the `anomaly_score` value. -`start`:: +`start` (Optional):: (string) Returns records with timestamps after this time. [[ml-get-record-results]] @@ -57,16 +67,6 @@ The API returns the following information: (array) An array of record objects. For more information, see <>. -[[ml-get-record-prereqs]] -==== {api-prereq-title} - -You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster -privileges to use this API. You also need `read` index privilege on the index -that stores the results. The `machine_learning_admin` and `machine_learning_user` -roles provide these privileges. For more information, see -{stack-ov}/security-privileges.html[Security privileges] and -{stack-ov}/built-in-roles.html[Built-in roles]. - [[ml-get-record-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/get-snapshot.asciidoc b/docs/reference/ml/apis/get-snapshot.asciidoc index eb5bc4354f2..ea1b15df33f 100644 --- a/docs/reference/ml/apis/get-snapshot.asciidoc +++ b/docs/reference/ml/apis/get-snapshot.asciidoc @@ -15,36 +15,43 @@ Retrieves information about model snapshots. `GET _ml/anomaly_detectors//model_snapshots/` +[[ml-get-snapshot-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `monitor_ml`, +`monitor`, `manage_ml`, or `manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. + [[ml-get-snapshot-path-parms]] ==== {api-path-parms-title} -`job_id`:: +`` (Required):: (string) Identifier for the job. -`snapshot_id`:: +`` (Optional):: (string) Identifier for the model snapshot. If you do not specify this optional parameter, the API returns information about all model snapshots. [[ml-get-snapshot-request-body]] ==== {api-request-body-title} -`desc`:: +`desc` (Optional):: (boolean) If true, the results are sorted in descending order. -`end`:: +`end` (Optional):: (date) Returns snapshots with timestamps earlier than this time. -`from`:: +`from` (Optional):: (integer) Skips the specified number of snapshots. -`size`:: +`size` (Optional):: (integer) Specifies the maximum number of snapshots to obtain. -`sort`:: +`sort` (Optional):: (string) Specifies the sort field for the requested snapshots. By default, the snapshots are sorted by their timestamp. -`start`:: +`start` (Optional):: (string) Returns snapshots with timestamps after this time. [[ml-get-snapshot-results]] @@ -56,13 +63,6 @@ The API returns the following information: (array) An array of model snapshot objects. For more information, see <>. -[[ml-get-snapshot-prereqs]] -==== {api-prereq-title} - -You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster -privileges to use this API. For more information, see -{stack-ov}/security-privileges.html[Security privileges]. - [[ml-get-snapshot-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/open-job.asciidoc b/docs/reference/ml/apis/open-job.asciidoc index 4966ab9fc65..84000cb89b0 100644 --- a/docs/reference/ml/apis/open-job.asciidoc +++ b/docs/reference/ml/apis/open-job.asciidoc @@ -15,34 +15,35 @@ A job can be opened and closed multiple times throughout its lifecycle. `POST _ml/anomaly_detectors/{job_id}/_open` +[[ml-open-job-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `manage_ml` or +`manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. + [[ml-open-job-desc]] ==== {api-description-title} When you open a new job, it starts with an empty model. -When you open an existing job, the most recent model state is automatically loaded. -The job is ready to resume its analysis from where it left off, once new data is received. +When you open an existing job, the most recent model state is automatically +loaded. The job is ready to resume its analysis from where it left off, once new +data is received. [[ml-open-job-path-parms]] ==== {api-path-parms-title} -`job_id` (required):: -(string) Identifier for the job +`` (Required):: + (string) Identifier for the job [[ml-open-job-request-body]] ==== {api-request-body-title} -`timeout`:: +`timeout` (Optional):: (time) Controls the time to wait until a job has opened. The default value is 30 minutes. -[[ml-open-job-prereqs]] -==== {api-prereq-title} - -You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see -{stack-ov}/security-privileges.html[Security privileges]. - [[ml-open-job-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/post-calendar-event.asciidoc b/docs/reference/ml/apis/post-calendar-event.asciidoc index 1a3614045ea..88d771f3b7f 100644 --- a/docs/reference/ml/apis/post-calendar-event.asciidoc +++ b/docs/reference/ml/apis/post-calendar-event.asciidoc @@ -13,6 +13,13 @@ Posts scheduled events in a calendar. `POST _ml/calendars//events` +[[ml-post-calendar-event-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `manage_ml` or +`manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. + [[ml-post-calendar-event-desc]] ==== {api-description-title} @@ -22,23 +29,16 @@ of which must have a start time, end time, and description. [[ml-post-calendar-event-path-parms]] ==== {api-path-parms-title} -`calendar_id` (required):: +`` (Required):: (string) Identifier for the calendar. [[ml-post-calendar-event-request-body]] ==== {api-request-body-title} -`events`:: - (array) A list of one of more scheduled events. The event's start and end times - may be specified as integer milliseconds since the epoch or as a string in ISO 8601 - format. See <>. - -[[ml-post-calendar-event-prereqs]] -==== {api-prereq-title} - -You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see -{stack-ov}/security-privileges.html[Security privileges]. +`events` (Required):: + (array) A list of one of more scheduled events. The event's start and end + times may be specified as integer milliseconds since the epoch or as a string + in ISO 8601 format. See <>. [[ml-post-calendar-event-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/post-data.asciidoc b/docs/reference/ml/apis/post-data.asciidoc index 39fb048d8b4..3c2d0e49fde 100644 --- a/docs/reference/ml/apis/post-data.asciidoc +++ b/docs/reference/ml/apis/post-data.asciidoc @@ -13,6 +13,13 @@ Sends data to an anomaly detection job for analysis. `POST _ml/anomaly_detectors//_data` +[[ml-post-data-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `manage_ml` or +`manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. + [[ml-post-data-desc]] ==== {api-description-title} @@ -45,17 +52,17 @@ or a comma-separated list. [[ml-post-data-path-parms]] ==== {api-path-parms-title} -`job_id` (required):: - (string) Identifier for the job +`` (Required):: + (string) Identifier for the job. [[ml-post-data-query-parms]] ==== {api-query-parms-title} -`reset_start`:: - (string) Specifies the start of the bucket resetting range +`reset_start` (Optional):: + (string) Specifies the start of the bucket resetting range. -`reset_end`:: - (string) Specifies the end of the bucket resetting range +`reset_end` (Optional):: + (string) Specifies the end of the bucket resetting range. [[ml-post-data-request-body]] ==== {api-request-body-title} @@ -63,17 +70,11 @@ or a comma-separated list. A sequence of one or more JSON documents containing the data to be analyzed. Only whitespace characters are permitted in between the documents. -[[ml-post-data-prereqs]] -==== {api-prereq-title} - -You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see -{stack-ov}/security-privileges.html[Security privileges]. - [[ml-post-data-example]] ==== {api-examples-title} -The following example posts data from the it_ops_new_kpi.json file to the `it_ops_new_kpi` job: +The following example posts data from the `it_ops_new_kpi.json` file to the +`it_ops_new_kpi` job: [source,js] -------------------------------------------------- @@ -82,8 +83,8 @@ $ curl -s -H "Content-type: application/json" --data-binary @it_ops_new_kpi.json -------------------------------------------------- -When the data is sent, you receive information about the operational progress of the job. -For example: +When the data is sent, you receive information about the operational progress of +the job. For example: [source,js] ---- diff --git a/docs/reference/ml/apis/preview-datafeed.asciidoc b/docs/reference/ml/apis/preview-datafeed.asciidoc index cfffe96b3de..4ca3ebcd10e 100644 --- a/docs/reference/ml/apis/preview-datafeed.asciidoc +++ b/docs/reference/ml/apis/preview-datafeed.asciidoc @@ -15,6 +15,13 @@ Previews a {dfeed}. `GET _ml/datafeeds//_preview` +[[ml-preview-datafeed-prereqs]] +==== {api-prereq-title} + +* If {es} {security-features} are enabled, you must have `monitor_ml`, `monitor`, +`manage_ml`, or `manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. + [[ml-preview-datafeed-desc]] ==== {api-description-title} @@ -22,30 +29,19 @@ The preview {dfeeds} API returns the first "page" of results from the `search` that is created by using the current {dfeed} settings. This preview shows the structure of the data that will be passed to the anomaly detection engine. +IMPORTANT: When {es} {security-features} are enabled, the {dfeed} query is +previewed using the credentials of the user calling the preview {dfeed} API. +When the {dfeed} is started it runs the query using the roles of the last user +to create or update it. If the two sets of roles differ then the preview may +not accurately reflect what the {dfeed} will return when started. To avoid +such problems, the same user that creates/updates the {dfeed} should preview +it to ensure it is returning the expected data. + [[ml-preview-datafeed-path-parms]] ==== {api-path-parms-title} -`datafeed_id` (required):: - (string) Identifier for the {dfeed} - -[[ml-preview-datafeed-prereqs]] -==== {api-prereq-title} - -If {es} {security-features} are enabled, you must have `monitor_ml`, `monitor`, -`manage_ml`, or `manage` cluster privileges to use this API. For more -information, see -{stack-ov}/security-privileges.html[Security privileges]. - -[[ml-preview-datafeed-security]] -==== Security Integration - -When {es} {security-features} are enabled, the {dfeed} query is previewed using -the credentials of the user calling the preview {dfeed} API. When the {dfeed} -is started it runs the query using the roles of the last user to -create or update it. If the two sets of roles differ then the preview may -not accurately reflect what the {dfeed} will return when started. To avoid -such problems, the same user that creates/updates the {dfeed} should preview -it to ensure it is returning the expected data. +`` (Required):: + (string) Identifier for the {dfeed}. [[ml-preview-datafeed-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/put-calendar-job.asciidoc b/docs/reference/ml/apis/put-calendar-job.asciidoc index abf124c8a11..0a1ee2fcc6d 100644 --- a/docs/reference/ml/apis/put-calendar-job.asciidoc +++ b/docs/reference/ml/apis/put-calendar-job.asciidoc @@ -13,22 +13,22 @@ Adds a job to a calendar. `PUT _ml/calendars//jobs/` -[[ml-put-calendar-job-path-parms]] -==== {api-path-parms-title} - -`calendar_id` (required):: - (string) Identifier for the calendar. - -`job_id` (required):: - (string) An identifier for the job. It can be a job identifier, a group name, or a - comma-separated list of jobs or groups. - [[ml-put-calendar-job-prereqs]] ==== {api-prereq-title} -You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see -{stack-ov}/security-privileges.html[Security Privileges]. +* If the {es} {security-features} are enabled, you must have `manage_ml` or +`manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. + +[[ml-put-calendar-job-path-parms]] +==== {api-path-parms-title} + +`` (Required):: + (string) Identifier for the calendar. + +`` (Required):: + (string) An identifier for the job. It can be a job identifier, a group name, + or a comma-separated list of jobs or groups. [[ml-put-calendar-job-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/put-calendar.asciidoc b/docs/reference/ml/apis/put-calendar.asciidoc index b7ea586a106..f98dd541d67 100644 --- a/docs/reference/ml/apis/put-calendar.asciidoc +++ b/docs/reference/ml/apis/put-calendar.asciidoc @@ -13,6 +13,13 @@ Instantiates a calendar. `PUT _ml/calendars/` +[[ml-put-calendar-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `manage_ml` or +`manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. + [[ml-put-calendar-desc]] ==== {api-description-title} @@ -22,22 +29,15 @@ For more information, see [[ml-put-calendar-path-parms]] ==== {api-path-parms-title} -`calendar_id` (required):: +`` (Required):: (string) Identifier for the calendar. [[ml-put-calendar-request-body]] ==== {api-request-body-title} -`description`:: +`description` (Optional):: (string) A description of the calendar. -[[ml-put-calendar-prereqs]] -==== {api-prereq-title} - -You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see -{stack-ov}/security-privileges.html[Security privileges]. - [[ml-put-calendar-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/put-datafeed.asciidoc b/docs/reference/ml/apis/put-datafeed.asciidoc index 428af146b4d..6c4578abb16 100644 --- a/docs/reference/ml/apis/put-datafeed.asciidoc +++ b/docs/reference/ml/apis/put-datafeed.asciidoc @@ -15,21 +15,34 @@ Instantiates a {dfeed}. `PUT _ml/datafeeds/` +[[ml-put-datafeed-prereqs]] +==== {api-prereq-title} + +* If {es} {security-features} are enabled, you must have `manage_ml` or `manage` +cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. + [[ml-put-datafeed-desc]] ==== {api-description-title} You must create a job before you create a {dfeed}. You can associate only one {dfeed} to each job. -IMPORTANT: You must use {kib} or this API to create a {dfeed}. Do not put a {dfeed} - directly to the `.ml-config` index using the Elasticsearch index API. - If {es} {security-features} are enabled, do not give users `write` - privileges on the `.ml-config` index. +[IMPORTANT] +==== +* You must use {kib} or this API to create a {dfeed}. Do not put a +{dfeed} directly to the `.ml-config` index using the {es} index API. If {es} +{security-features} are enabled, do not give users `write` privileges on the +`.ml-config` index. +* When {es} {security-features} are enabled, your {dfeed} remembers which roles +the user who created it had at the time of creation and runs the query using +those same roles. +==== [[ml-put-datafeed-path-parms]] ==== {api-path-parms-title} -`feed_id` (required):: +`` (Required):: (string) A numerical character string that uniquely identifies the {dfeed}. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. @@ -37,73 +50,58 @@ IMPORTANT: You must use {kib} or this API to create a {dfeed}. Do not put a {df [[ml-put-datafeed-request-body]] ==== {api-request-body-title} -`aggregations`:: +`aggregations` (Optional):: (object) If set, the {dfeed} performs aggregation searches. For more information, see <>. -`chunking_config`:: +`chunking_config` (Optional):: (object) Specifies how data searches are split into time chunks. See <>. -`delayed_data_check_config`:: +`delayed_data_check_config` (Optional):: (object) Specifies whether the data feed checks for missing data and the size of the window. See <>. -`frequency`:: +`frequency` (Optional):: (time units) The interval at which scheduled queries are made while the {dfeed} runs in real time. The default value is either the bucket span for short bucket spans, or, for longer bucket spans, a sensible fraction of the bucket span. For example: `150s`. -`indices` (required):: +`indices` (Required):: (array) An array of index names. Wildcards are supported. For example: `["it_ops_metrics", "server*"]`. -`job_id` (required):: +`job_id` (Required):: (string) A numerical character string that uniquely identifies the job. -`query`:: +`query` (Optional):: (object) The {es} query domain-specific language (DSL). This value corresponds to the query object in an {es} search POST body. All the options that are supported by {Es} can be used, as this object is passed verbatim to {es}. By default, this property has the following value: `{"match_all": {"boost": 1}}`. -`query_delay`:: +`query_delay` (Optional):: (time units) The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might not be searchable in {es} until 10:06 a.m., set this property to 120 seconds. The default value is `60s`. -`script_fields`:: +`script_fields` (Optional):: (object) Specifies scripts that evaluate custom expressions and returns script fields to the {dfeed}. The <> in a job can contain - functions that use these script fields. - For more information, + functions that use these script fields. For more information, see {ref}/search-request-script-fields.html[Script Fields]. -`scroll_size`:: +`scroll_size` (Optional):: (unsigned integer) The `size` parameter that is used in {es} searches. The default value is `1000`. For more information about these properties, see <>. -[[ml-put-datafeed-prereqs]] -==== {api-prereq-title} - -If {es} {security-features} are enabled, you must have `manage_ml`, or `manage` -cluster privileges to use this API. For more information, see -{stack-ov}/security-privileges.html[Security privileges]. - -[[ml-put-datafeed-security]] -==== Security integration - -When {es} {security-features} are enabled, your {dfeed} remembers which roles the -user who created it had at the time of creation and runs the query using those -same roles. - [[ml-put-datafeed-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/put-filter.asciidoc b/docs/reference/ml/apis/put-filter.asciidoc index 61ed24f4d5b..ad0d6d34ea8 100644 --- a/docs/reference/ml/apis/put-filter.asciidoc +++ b/docs/reference/ml/apis/put-filter.asciidoc @@ -13,6 +13,13 @@ Instantiates a filter. `PUT _ml/filters/` +[[ml-put-filter-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `manage_ml` or +`manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. + [[ml-put-filter-desc]] ==== {api-description-title} @@ -23,28 +30,21 @@ the `custom_rules` property of <` (Required):: (string) Identifier for the filter. [[ml-put-filter-request-body]] ==== {api-request-body-title} -`description`:: +`description` (Optional):: (string) A description of the filter. -`items`:: +`items` (Required):: (array of strings) The items of the filter. A wildcard `*` can be used at the beginning or the end of an item. Up to 10000 items are allowed in each filter. -[[ml-put-filter-prereqs]] -==== {api-prereq-title} - -You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see -{stack-ov}/security-privileges.html[Security privileges]. - [[ml-put-filter-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/put-job.asciidoc b/docs/reference/ml/apis/put-job.asciidoc index acf8d9db824..c60de488180 100644 --- a/docs/reference/ml/apis/put-job.asciidoc +++ b/docs/reference/ml/apis/put-job.asciidoc @@ -13,6 +13,13 @@ Instantiates a job. `PUT _ml/anomaly_detectors/` +[[ml-put-job-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `manage_ml` or +`manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. + [[ml-put-job-desc]] ==== {api-description-title} @@ -24,7 +31,7 @@ IMPORTANT: You must use {kib} or this API to create a {ml} job. Do not put a job [[ml-put-job-path-parms]] ==== {api-path-parms-title} -`job_id` (required):: +`` (Required):: (string) Identifier for the job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. @@ -32,61 +39,54 @@ IMPORTANT: You must use {kib} or this API to create a {ml} job. Do not put a job [[ml-put-job-request-body]] ==== {api-request-body-title} -`analysis_config`:: +`analysis_config` (Required):: (object) The analysis configuration, which specifies how to analyze the data. See <>. -`analysis_limits`:: +`analysis_limits` (Optional):: (object) Specifies runtime limits for the job. See <>. -`background_persist_interval`:: +`background_persist_interval` (Optional):: (time units) Advanced configuration option. The time between each periodic persistence of the model. See <>. -`custom_settings`:: +`custom_settings` (Optional):: (object) Advanced configuration option. Contains custom meta data about the job. See <>. -`data_description` (required):: +`data_description` (Required):: (object) Describes the format of the input data. This object is required, but it can be empty (`{}`). See <>. -`description`:: +`description` (Optional):: (string) A description of the job. -`groups`:: +`groups` (Optional):: (array of strings) A list of job groups. See <>. -`model_plot_config`:: +`model_plot_config` (Optional):: (object) Advanced configuration option. Specifies to store model information along with the results. This adds overhead to the performance of the system and is not feasible for jobs with many entities, see <>. -`model_snapshot_retention_days`:: +`model_snapshot_retention_days` (Optional):: (long) The time in days that model snapshots are retained for the job. Older snapshots are deleted. The default value is `1`, which means snapshots are retained for one day (twenty-four hours). -`renormalization_window_days`:: +`renormalization_window_days` (Optional):: (long) Advanced configuration option. The period over which adjustments to the score are applied, as new data is seen. See <>. -`results_index_name`:: +`results_index_name` (Optional):: (string) A text string that affects the name of the {ml} results index. The default value is `shared`, which generates an index named `.ml-anomalies-shared`. -`results_retention_days`:: +`results_retention_days` (Optional):: (long) Advanced configuration option. The number of days for which job results are retained. See <>. -[[ml-put-job-prereqs]] -==== {api-prereq-title} - -You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see -{stack-ov}/security-privileges.html[Security privileges]. - [[ml-put-job-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/revert-snapshot.asciidoc b/docs/reference/ml/apis/revert-snapshot.asciidoc index f470b4ec60f..86d3d4c14a9 100644 --- a/docs/reference/ml/apis/revert-snapshot.asciidoc +++ b/docs/reference/ml/apis/revert-snapshot.asciidoc @@ -13,6 +13,13 @@ Reverts to a specific snapshot. `POST _ml/anomaly_detectors//model_snapshots//_revert` +[[ml-revert-snapshot-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `manage_ml` or +`manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. + [[ml-revert-snapshot-desc]] ==== {api-description-title} @@ -29,16 +36,16 @@ IMPORTANT: Before you revert to a saved snapshot, you must close the job. [[ml-revert-snapshot-path-parms]] ==== {api-path-parms-title} -`job_id` (required):: - (string) Identifier for the job +`` (Required):: + (string) Identifier for the job. -`snapshot_id` (required):: - (string) Identifier for the model snapshot +`` (Required):: + (string) Identifier for the model snapshot. [[ml-revert-snapshot-request-body]] ==== {api-request-body-title} -`delete_intervening_results`:: +`delete_intervening_results` (Optional):: (boolean) If true, deletes the results in the time period between the latest results and the time of the reverted snapshot. It also resets the model to accept records for this time period. The default value is false. @@ -47,13 +54,6 @@ NOTE: If you choose not to delete intervening results when reverting a snapshot, the job will not accept input data that is older than the current time. If you want to resend data, then delete the intervening results. -[[ml-revert-snapshot-prereqs]] -==== {api-prereq-title} - -You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see -{stack-ov}/security-privileges.html[Security privileges]. - [[ml-revert-snapshot-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/set-upgrade-mode.asciidoc b/docs/reference/ml/apis/set-upgrade-mode.asciidoc index 16ddbe19e59..6a00656430c 100644 --- a/docs/reference/ml/apis/set-upgrade-mode.asciidoc +++ b/docs/reference/ml/apis/set-upgrade-mode.asciidoc @@ -26,6 +26,13 @@ POST /_ml/set_upgrade_mode?enabled=false&timeout=10m `POST _ml/set_upgrade_mode` +[[ml-set-upgrade-mode-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `manage_ml` or +`manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. + [[ml-set-upgrade-mode-desc]] ==== {api-description-title} @@ -54,20 +61,13 @@ IMPORTANT: No new {ml} jobs can be opened while the `upgrade_mode` setting is [[ml-set-upgrade-mode-query-parms]] ==== {api-query-parms-title} -`enabled`:: +`enabled` (Optional):: (boolean) When `true`, this enables `upgrade_mode`. Defaults to `false` -`timeout`:: +`timeout` (Optional):: (time) The time to wait for the request to be completed. The default value is 30 seconds. -[[ml-set-upgrade-mode-prereqs]] -==== {api-prereq-title} - -You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see -{stack-ov}/security-privileges.html[Security privileges]. - [[ml-set-upgrade-mode-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/start-datafeed.asciidoc b/docs/reference/ml/apis/start-datafeed.asciidoc index 35c632d5c41..05cf0766e95 100644 --- a/docs/reference/ml/apis/start-datafeed.asciidoc +++ b/docs/reference/ml/apis/start-datafeed.asciidoc @@ -17,6 +17,13 @@ A {dfeed} can be started and stopped multiple times throughout its lifecycle. `POST _ml/datafeeds//_start` +[[ml-start-datafeed-prereqs]] +==== {api-prereq-title} + +* If {es} {security-features} are enabled, you must have `manage_ml` or `manage` +cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. + [[ml-start-datafeed-desc]] ==== {api-description-title} @@ -58,41 +65,31 @@ If you specify a `start` value that is earlier than the timestamp of the latest processed record, the {dfeed} continues from 1 millisecond after the timestamp of the latest processed record. +IMPORTANT: When {es} {security-features} are enabled, your {dfeed} remembers +which roles the last user to create or update it had at the time of +creation/update and runs the query using those same roles. + [[ml-start-datafeed-path-parms]] ==== {api-path-parms-title} -`feed_id` (required):: -(string) Identifier for the {dfeed} +`` (Required):: + (string) Identifier for the {dfeed}. [[ml-start-datafeed-request-body]] ==== {api-request-body-title} -`end`:: +`end` (Optional):: (string) The time that the {dfeed} should end. This value is exclusive. The default value is an empty string. -`start`:: +`start` (Optional):: (string) The time that the {dfeed} should begin. This value is inclusive. The default value is an empty string. -`timeout`:: +`timeout` (Optional):: (time) Controls the amount of time to wait until a {dfeed} starts. The default value is 20 seconds. -[[ml-start-datafeed-prereqs]] -==== {api-prereq-title} - -If {es} {security-features} are enabled, you must have `manage_ml`, or `manage` -cluster privileges to use this API. For more information, see -{stack-ov}/security-privileges.html[Security privileges]. - -[[ml-start-datafeed-security]] -==== Security integration - -When {es} {security-features} are enabled, your {dfeed} remembers which roles the -last user to create or update it had at the time of creation/update and runs the -query using those same roles. - [[ml-start-datafeed-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/stop-datafeed.asciidoc b/docs/reference/ml/apis/stop-datafeed.asciidoc index 497975f425c..bdac8d51fab 100644 --- a/docs/reference/ml/apis/stop-datafeed.asciidoc +++ b/docs/reference/ml/apis/stop-datafeed.asciidoc @@ -10,9 +10,6 @@ Stops one or more {dfeeds}. -A {dfeed} that is stopped ceases to retrieve data from {es}. -A {dfeed} can be started and stopped multiple times throughout its lifecycle. - [[ml-stop-datafeed-request]] ==== {api-request-title} @@ -22,9 +19,19 @@ A {dfeed} can be started and stopped multiple times throughout its lifecycle. `POST _ml/datafeeds/_all/_stop` +[[ml-stop-datafeed-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `manage_ml` or +`manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. + [[ml-stop-datafeed-desc]] ==== {api-description-title} +A {dfeed} that is stopped ceases to retrieve data from {es}. +A {dfeed} can be started and stopped multiple times throughout its lifecycle. + You can stop multiple {dfeeds} in a single API request by using a comma-separated list of {dfeeds} or a wildcard expression. You can close all {dfeeds} by using `_all` or by specifying `*` as the ``. @@ -32,27 +39,20 @@ comma-separated list of {dfeeds} or a wildcard expression. You can close all [[ml-stop-datafeed-path-parms]] ==== {api-path-parms-title} -`feed_id`:: +`` (Required):: (string) Identifier for the {dfeed}. It can be a {dfeed} identifier or a wildcard expression. [[ml-stop-datafeed-request-body]] ==== {api-request-body-title} -`force`:: +`force` (Optional):: (boolean) If true, the {dfeed} is stopped forcefully. -`timeout`:: +`timeout` (Optional):: (time) Controls the amount of time to wait until a {dfeed} stops. The default value is 20 seconds. -[[ml-stop-datafeed-prereqs]] -==== {api-prereq-title} - -You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see -{stack-ov}/security-privileges.html[Security privileges]. - [[ml-stop-datafeed-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/update-datafeed.asciidoc b/docs/reference/ml/apis/update-datafeed.asciidoc index 9c3e56e66a6..b57088673d8 100644 --- a/docs/reference/ml/apis/update-datafeed.asciidoc +++ b/docs/reference/ml/apis/update-datafeed.asciidoc @@ -15,61 +15,72 @@ Updates certain properties of a {dfeed}. `POST _ml/datafeeds//_update` +[[ml-update-datafeed-prereqs]] +==== {api-prereq-title} + +* If {es} {security-features} are enabled, you must have `manage_ml`, or `manage` +cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. + [[ml-update-datafeed-desc]] ==== {api-description-title} -NOTE: If you update the `delayed_data_check_config` property, you must stop and +If you update the `delayed_data_check_config` property, you must stop and start the {dfeed} for the change to be applied. +IMPORTANT: When {es} {security-features} are enabled, your {dfeed} remembers +which roles the user who updated it had at the time of update and runs the query +using those same roles. + [[ml-update-datafeed-path-parms]] ==== {api-path-parms-title} -`feed_id` (required):: - (string) Identifier for the {dfeed} +`` (Required):: + (string) Identifier for the {dfeed}. [[ml-update-datafeed-request-body]] ==== {api-request-body-title} The following properties can be updated after the {dfeed} is created: -`aggregations`:: +`aggregations` (Optional):: (object) If set, the {dfeed} performs aggregation searches. For more information, see <>. -`chunking_config`:: +`chunking_config` (Optional):: (object) Specifies how data searches are split into time chunks. See <>. -`delayed_data_check_config`:: +`delayed_data_check_config` (Optional):: (object) Specifies whether the data feed checks for missing data and the size of the window. See <>. -`frequency`:: +`frequency` (Optional):: (time units) The interval at which scheduled queries are made while the {dfeed} runs in real time. The default value is either the bucket span for short bucket spans, or, for longer bucket spans, a sensible fraction of the bucket span. For example: `150s`. -`indices`:: +`indices` (Optional):: (array) An array of index names. Wildcards are supported. For example: `["it_ops_metrics", "server*"]`. -`job_id`:: +`job_id` (Optional):: (string) A numerical character string that uniquely identifies the job. -`query`:: +`query` (Optional):: (object) The {es} query domain-specific language (DSL). This value corresponds to the query object in an {es} search POST body. All the options that are supported by {es} can be used, as this object is passed verbatim to {es}. By default, this property has the following value: `{"match_all": {"boost": 1}}`. -`query_delay`:: +`query_delay` (Optional):: (time units) The number of seconds behind real-time that data is queried. For example, if data from 10:04 a.m. might not be searchable in {es} until 10:06 a.m., set this property to 120 seconds. The default value is `60s`. -`script_fields`:: +`script_fields` (Optional):: (object) Specifies scripts that evaluate custom expressions and returns script fields to the {dfeed}. The <> in a job can contain @@ -77,27 +88,13 @@ The following properties can be updated after the {dfeed} is created: For more information, see {ref}/search-request-script-fields.html[Script Fields]. -`scroll_size`:: +`scroll_size` (Optional):: (unsigned integer) The `size` parameter that is used in {es} searches. The default value is `1000`. For more information about these properties, see <>. -[[ml-update-datafeed-prereqs]] -==== {api-prereq-title} - -If {es} {security-features} are enabled, you must have `manage_ml`, or `manage` -cluster privileges to use this API. For more information, see -{stack-ov}/security-privileges.html[Security privileges]. - -[[ml-update-datafeed-security]] -==== Security Integration - -When {es} {security-features} are enabled, your {dfeed} remembers which roles the -user who updated it had at the time of update and runs the query using those -same roles. - [[ml-update-datafeed-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/update-filter.asciidoc b/docs/reference/ml/apis/update-filter.asciidoc index 842808ebe55..df8f3056d12 100644 --- a/docs/reference/ml/apis/update-filter.asciidoc +++ b/docs/reference/ml/apis/update-filter.asciidoc @@ -13,35 +13,36 @@ Updates the description of a filter, adds items, or removes items. `POST _ml/filters//_update` -[[ml-update-filter-path-parms]] -==== {api-path-parms-title} - -`filter_id` (required):: - (string) Identifier for the filter. - -[[ml-update-filter-request-body]] -==== Request Body - -`description`:: - (string) A description for the filter. See <>. - -`add_items`:: - (array of strings) The items to add to the filter. - -`remove_items`:: - (array of strings) The items to remove from the filter. - [[ml-update-filter-prereqs]] ==== {api-prereq-title} -You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see +* If the {es} {security-features} are enabled, you must have `manage_ml` or +`manage` cluster privileges to use this API. See {stack-ov}/security-privileges.html[Security privileges]. +[[ml-update-filter-path-parms]] +==== {api-path-parms-title} + +`` (Required):: + (string) Identifier for the filter. + +[[ml-update-filter-request-body]] +==== {api-request-body-title} + +`description` (Optional):: + (string) A description for the filter. See <>. + +`add_items` (Optional):: + (array of strings) The items to add to the filter. + +`remove_items` (Optional):: + (array of strings) The items to remove from the filter. + [[ml-update-filter-example]] ==== {api-examples-title} -You can change the description, add and remove items to the `safe_domains` filter as follows: +You can change the description, add and remove items to the `safe_domains` +filter as follows: [source,js] -------------------------------------------------- diff --git a/docs/reference/ml/apis/update-job.asciidoc b/docs/reference/ml/apis/update-job.asciidoc index 39c510bda1e..e78bda613d8 100644 --- a/docs/reference/ml/apis/update-job.asciidoc +++ b/docs/reference/ml/apis/update-job.asciidoc @@ -13,11 +13,19 @@ Updates certain properties of a job. `POST _ml/anomaly_detectors//_update` +[[ml-update-job-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `manage_ml` or +`manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. + + [[ml-update-job-path-parms]] ==== {api-path-parms-title} -`job_id` (required):: - (string) Identifier for the job +`` (Required):: + (string) Identifier for the job. [[ml-update-job-request-body]] ==== {api-request-body-title} @@ -88,13 +96,6 @@ A detector update object has the following properties: No other detector property can be updated. -[[ml-update-job-prereqs]] -==== {api-prereq-title} - -You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see -{stack-ov}/security-privileges.html[Security privileges]. - [[ml-update-job-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/update-snapshot.asciidoc b/docs/reference/ml/apis/update-snapshot.asciidoc index edf9e05d867..1fe2ed5384b 100644 --- a/docs/reference/ml/apis/update-snapshot.asciidoc +++ b/docs/reference/ml/apis/update-snapshot.asciidoc @@ -13,37 +13,38 @@ Updates certain properties of a snapshot. `POST _ml/anomaly_detectors//model_snapshots//_update` +[[ml-update-snapshot-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `manage_ml` or +`manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. + + [[ml-update-snapshot-path-parms]] ==== {api-path-parms-title} -`job_id` (required):: - (string) Identifier for the job +`` (Required):: + (string) Identifier for the job. -`snapshot_id` (required):: - (string) Identifier for the model snapshot +`` (Required):: + (string) Identifier for the model snapshot. [[ml-update-snapshot-request-body]] ==== {api-request-body-title} The following properties can be updated after the model snapshot is created: -`description`:: - (string) An optional description of the model snapshot. For example, +`description` (Optional):: + (string) A description of the model snapshot. For example, "Before black friday". -`retain`:: +`retain` (Optional):: (boolean) If true, this snapshot will not be deleted during automatic cleanup of snapshots older than `model_snapshot_retention_days`. Note that this snapshot will still be deleted when the job is deleted. The default value is false. -[[ml-update-snapshot-prereqs]] -==== {api-prereq-title} - -You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see -{stack-ov}/security-privileges.html[Security privileges]. - [[ml-update-snapshot-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/validate-detector.asciidoc b/docs/reference/ml/apis/validate-detector.asciidoc index a3b7ca66072..2e5896b95cc 100644 --- a/docs/reference/ml/apis/validate-detector.asciidoc +++ b/docs/reference/ml/apis/validate-detector.asciidoc @@ -13,6 +13,13 @@ Validates detector configuration information. `POST _ml/anomaly_detectors/_validate/detector` +[[ml-valid-detector-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `manage_ml` or +`manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. + [[ml-valid-detector-desc]] ==== {api-description-title} @@ -25,13 +32,6 @@ before you create a job. For a list of the properties that you can specify in the body of this API, see <>. -[[ml-valid-detector-prereqs]] -==== {api-prereq-title} - -You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see -{stack-ov}/security-privileges.html[Security privileges]. - [[ml-valid-detector-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/validate-job.asciidoc b/docs/reference/ml/apis/validate-job.asciidoc index 651e4571569..faa7cab2f39 100644 --- a/docs/reference/ml/apis/validate-job.asciidoc +++ b/docs/reference/ml/apis/validate-job.asciidoc @@ -13,6 +13,13 @@ Validates job configuration information. `POST _ml/anomaly_detectors/_validate` +[[ml-valid-job-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `manage_ml` or +`manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. + [[ml-valid-job-desc]] ==== {api-description-title} @@ -25,13 +32,6 @@ create the job. For a list of the properties that you can specify in the body of this API, see <>. -[[ml-valid-job-prereqs]] -==== {api-prereq-title} - -You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see -{stack-ov}/security-privileges.html[Security privileges]. - [[ml-valid-job-example]] ==== {api-examples-title} From 1b7bcdc3a0329e03efb3a8fabdfdb20e717c0fac Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Thu, 27 Jun 2019 15:16:24 -0700 Subject: [PATCH 15/42] [DOCS] Adds data frame API response codes for allow_no_match (#43666) --- .../apis/delete-transform.asciidoc | 2 +- .../apis/get-transform-stats.asciidoc | 44 ++++++++++++++----- .../data-frames/apis/get-transform.asciidoc | 42 ++++++++++++++---- .../apis/preview-transform.asciidoc | 2 +- .../data-frames/apis/put-transform.asciidoc | 5 +-- .../data-frames/apis/start-transform.asciidoc | 2 +- .../data-frames/apis/stop-transform.asciidoc | 30 +++++++++++-- docs/reference/ml/apis/close-job.asciidoc | 2 - 8 files changed, 98 insertions(+), 31 deletions(-) diff --git a/docs/reference/data-frames/apis/delete-transform.asciidoc b/docs/reference/data-frames/apis/delete-transform.asciidoc index 23c70d914f0..d772bc3c15d 100644 --- a/docs/reference/data-frames/apis/delete-transform.asciidoc +++ b/docs/reference/data-frames/apis/delete-transform.asciidoc @@ -22,7 +22,7 @@ Deletes an existing {dataframe-transform}. [[delete-data-frame-transform-prereqs]] ==== {api-prereq-title} -If the {es} {security-features} are enabled, you must have +* If the {es} {security-features} are enabled, you must have `manage_data_frame_transforms` cluster privileges to use this API. The built-in `data_frame_transforms_admin` role has these privileges. For more information, see {stack-ov}/security-privileges.html[Security privileges] and diff --git a/docs/reference/data-frames/apis/get-transform-stats.asciidoc b/docs/reference/data-frames/apis/get-transform-stats.asciidoc index 5751c8a3ea7..889a109b8a3 100644 --- a/docs/reference/data-frames/apis/get-transform-stats.asciidoc +++ b/docs/reference/data-frames/apis/get-transform-stats.asciidoc @@ -31,15 +31,21 @@ Retrieves usage information for {dataframe-transforms}. [[get-data-frame-transform-stats-prereqs]] ==== {api-prereq-title} -If the {es} {security-features} are enabled, you must have +* If the {es} {security-features} are enabled, you must have `monitor_data_frame_transforms` cluster privileges to use this API. The built-in `data_frame_transforms_user` role has these privileges. For more information, see {stack-ov}/security-privileges.html[Security privileges] and {stack-ov}/built-in-roles.html[Built-in roles]. -//[discrete] -//[[get-data-frame-transform-stats-desc]] -//===== {api-description-title} +[discrete] +[[get-data-frame-transform-stats-desc]] +==== {api-description-title} + +You can get statistics for multiple {dataframe-transforms} in a single API +request by using a comma-separated list of identifiers or a wildcard expression. +You can get statistics for all {dataframe-transforms} by using `_all`, by +specifying `*` as the ``, or by omitting the +``. [discrete] [[get-data-frame-transform-stats-path-parms]] @@ -56,17 +62,26 @@ see {stack-ov}/security-privileges.html[Security privileges] and ==== {api-query-parms-title} `allow_no_match` (Optional):: - (boolean) Whether to ignore if a wildcard expression matches no - {dataframe-transforms}. This includes `_all` string or when no transforms have - been specified. The default is `true`. + (boolean) Specifies what to do when the request: ++ +-- +* Contains wildcard expressions and there are no {dataframe-transforms} that match. +* Contains the `_all` string or no identifiers and there are no matches. +* Contains wildcard expressions and there are only partial matches. + +The default value is `true`, which returns an empty `transforms` array when +there are no matches and the subset of results when there are partial matches. +If this parameter is `false`, the request returns a `404` status code when there +are no matches or only partial matches. +-- `from` (Optional):: - (integer) Skips the specified number of {dataframe-transforms}. The - default value is `0`. + (integer) Skips the specified number of {dataframe-transforms}. The + default value is `0`. `size` (Optional):: - (integer) Specifies the maximum number of {dataframe-transforms} to obtain. - The default value is `100`. + (integer) Specifies the maximum number of {dataframe-transforms} to obtain. + The default value is `100`. [discrete] [[get-data-frame-transform-stats-response]] @@ -75,6 +90,13 @@ see {stack-ov}/security-privileges.html[Security privileges] and `transforms`:: (array) An array of statistics objects for {dataframe-transforms}, which are sorted by the `id` value in ascending order. + +[[get-data-frame-transform-stats-response-codes]] +==== {api-response-codes-title} + +`404` (Missing resources):: + If `allow_no_match` is `false`, this code indicates that there are no + resources that match the request or only partial matches for the request. [discrete] [[get-data-frame-transform-stats-example]] diff --git a/docs/reference/data-frames/apis/get-transform.asciidoc b/docs/reference/data-frames/apis/get-transform.asciidoc index 847d764c012..bf7901c191e 100644 --- a/docs/reference/data-frames/apis/get-transform.asciidoc +++ b/docs/reference/data-frames/apis/get-transform.asciidoc @@ -30,12 +30,22 @@ Retrieves configuration information for {dataframe-transforms}. [[get-data-frame-transform-prereqs]] ==== {api-prereq-title} -If the {es} {security-features} are enabled, you must have +* If the {es} {security-features} are enabled, you must have `monitor_data_frame_transforms` cluster privileges to use this API. The built-in `data_frame_transforms_user` role has these privileges. For more information, see {stack-ov}/security-privileges.html[Security privileges] and {stack-ov}/built-in-roles.html[Built-in roles]. +[discrete] +[[get-data-frame-transform-desc]] +==== {api-description-title} + +You can get information for multiple {dataframe-transforms} in a single API +request by using a comma-separated list of identifiers or a wildcard expression. +You can get information for all {dataframe-transforms} by using `_all`, by +specifying `*` as the ``, or by omitting the +``. + [discrete] [[get-data-frame-transform-path-parms]] ==== {api-path-parms-title} @@ -51,17 +61,26 @@ see {stack-ov}/security-privileges.html[Security privileges] and ==== {api-query-parms-title} `allow_no_match` (Optional):: - (boolean) Whether to ignore if a wildcard expression matches no - {dataframe-transforms}. This includes `_all` string or when no transforms have - been specified. The default is `true`. +(boolean) Specifies what to do when the request: ++ +-- +* Contains wildcard expressions and there are no {dataframe-transforms} that match. +* Contains the `_all` string or no identifiers and there are no matches. +* Contains wildcard expressions and there are only partial matches. + +The default value is `true`, which returns an empty `transforms` array when +there are no matches and the subset of results when there are partial matches. +If this parameter is `false`, the request returns a `404` status code when there +are no matches or only partial matches. +-- `from` (Optional):: - (integer) Skips the specified number of {dataframe-transforms}. The - default value is `0`. + (integer) Skips the specified number of {dataframe-transforms}. The + default value is `0`. `size` (Optional):: - (integer) Specifies the maximum number of {dataframe-transforms} to obtain. - The default value is `100`. + (integer) Specifies the maximum number of {dataframe-transforms} to obtain. + The default value is `100`. [discrete] [[get-data-frame-transform-response]] @@ -70,6 +89,13 @@ see {stack-ov}/security-privileges.html[Security privileges] and `transforms`:: (array) An array of transform resources, which are sorted by the `id` value in ascending order. + +[[get-data-frame-transform-response-codes]] +==== {api-response-codes-title} + +`404` (Missing resources):: + If `allow_no_match` is `false`, this code indicates that there are no + resources that match the request or only partial matches for the request. [discrete] [[get-data-frame-transform-example]] diff --git a/docs/reference/data-frames/apis/preview-transform.asciidoc b/docs/reference/data-frames/apis/preview-transform.asciidoc index 4e11fd5eda2..5dfe1f2f1d7 100644 --- a/docs/reference/data-frames/apis/preview-transform.asciidoc +++ b/docs/reference/data-frames/apis/preview-transform.asciidoc @@ -22,7 +22,7 @@ Previews a {dataframe-transform}. [[preview-data-frame-transform-prereq]] ==== {api-prereq-title} -If the {es} {security-features} are enabled, you must have +* If the {es} {security-features} are enabled, you must have `manage_data_frame_transforms` cluster privileges to use this API. The built-in `data_frame_transforms_admin` role has these privileges. You must also have `read` and `view_index_metadata` privileges on the source index for the diff --git a/docs/reference/data-frames/apis/put-transform.asciidoc b/docs/reference/data-frames/apis/put-transform.asciidoc index 3c6a5251bff..6910cb85a25 100644 --- a/docs/reference/data-frames/apis/put-transform.asciidoc +++ b/docs/reference/data-frames/apis/put-transform.asciidoc @@ -22,7 +22,7 @@ Instantiates a {dataframe-transform}. [[put-data-frame-transform-prereqs]] ==== {api-prereq-title} -If the {es} {security-features} are enabled, you must have +* If the {es} {security-features} are enabled, you must have `manage_data_frame_transforms` cluster privileges to use this API. The built-in `data_frame_transforms_admin` role has these privileges. You must also have `read` and `view_index_metadata` privileges on the source index and `read`, @@ -30,10 +30,9 @@ have `read` and `view_index_metadata` privileges on the source index and `read`, information, see {stack-ov}/security-privileges.html[Security privileges] and {stack-ov}/built-in-roles.html[Built-in roles]. - [discrete] [[put-data-frame-transform-desc]] -===== {api-description-title} +==== {api-description-title} IMPORTANT: You must use {kib} or this API to create a {dataframe-transform}. Do not put a {dataframe-transform} directly into any diff --git a/docs/reference/data-frames/apis/start-transform.asciidoc b/docs/reference/data-frames/apis/start-transform.asciidoc index e7ae0353f5c..b76bcb0dd47 100644 --- a/docs/reference/data-frames/apis/start-transform.asciidoc +++ b/docs/reference/data-frames/apis/start-transform.asciidoc @@ -22,7 +22,7 @@ Starts one or more {dataframe-transforms}. [[start-data-frame-transform-prereqs]] ==== {api-prereq-title} -If the {es} {security-features} are enabled, you must have +* If the {es} {security-features} are enabled, you must have `manage_data_frame_transforms` cluster privileges to use this API. You must also have `view_index_metadata` privileges on the source index for the {dataframe-transform}. For more information, see diff --git a/docs/reference/data-frames/apis/stop-transform.asciidoc b/docs/reference/data-frames/apis/stop-transform.asciidoc index 9a08aaf0a9b..80c2654babe 100644 --- a/docs/reference/data-frames/apis/stop-transform.asciidoc +++ b/docs/reference/data-frames/apis/stop-transform.asciidoc @@ -26,7 +26,7 @@ Stops one or more {dataframe-transforms}. [[stop-data-frame-transform-prereq]] ==== {api-prereq-title} -If the {es} {security-features} are enabled, you must have +* If the {es} {security-features} are enabled, you must have `manage_data_frame_transforms` cluster privileges to use this API. The built-in `data_frame_transforms_admin` role has these privileges. For more information, see {stack-ov}/security-privileges.html[Security privileges] and @@ -55,9 +55,23 @@ All {dataframe-transforms} can be stopped by using `_all` or `*` as the ==== {api-query-parms-title} `allow_no_match` (Optional):: - (boolean) Whether to ignore if a wildcard expression matches no - {dataframe-transforms}. This includes `_all` string or when no transforms have - been specified. The default is `true`. +(boolean) Specifies what to do when the request: ++ +-- +* Contains wildcard expressions and there are no {dataframe-transforms} that match. +* Contains the `_all` string or no identifiers and there are no matches. +* Contains wildcard expressions and there are only partial matches. + +The default value is `true`, which returns a successful acknowledgement message +when there are no matches. When there are only partial matches, the API stops +the appropriate {dataframe-transforms}. For example, if the request contains +`test-id1*,test-id2*` as the identifiers and there are no {dataframe-transforms} +that match `test-id2*`, the API nonetheless stops the {dataframe-transforms} +that match `test-id1*`. + +If this parameter is `false`, the request returns a `404` status code when there +are no matches or only partial matches. +-- `timeout` (Optional):: (time value) If `wait_for_completion=true`, the API blocks for (at maximum) @@ -72,6 +86,14 @@ All {dataframe-transforms} can be stopped by using `_all` or `*` as the completely stops. If set to `false`, the API returns immediately and the indexer will be stopped asynchronously in the background. Defaults to `false`. +[discrete] +[[stop-data-frame-transform-response-codes]] +==== {api-response-codes-title} + +`404` (Missing resources):: + If `allow_no_match` is `false`, this code indicates that there are no + resources that match the request or only partial matches for the request. + [discrete] [[stop-data-frame-transform-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/apis/close-job.asciidoc b/docs/reference/ml/apis/close-job.asciidoc index 8eb78cff006..2a38648e48f 100644 --- a/docs/reference/ml/apis/close-job.asciidoc +++ b/docs/reference/ml/apis/close-job.asciidoc @@ -12,7 +12,6 @@ A job can be opened and closed multiple times throughout its lifecycle. A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results. -[discrete] [[ml-close-job-request]] ==== {api-request-title} @@ -55,7 +54,6 @@ after the close job API returns. The `force` query parameter should only be use situations where the job has already failed, or where you are not interested in results the job might have recently produced or might produce in the future. -[discrete] [[ml-close-job-path-parms]] ==== {api-path-parms-title} From ce8771feb75c69601ab5710d6f5c4869ddef140c Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Thu, 27 Jun 2019 18:27:16 -0400 Subject: [PATCH 16/42] Do not use MockInternalEngine in GatewayIndexStateIT (#43716) GatewayIndexStateIT#testRecoverBrokenIndexMetadata replies on the flushing on shutdown. This behaviour, however, can be randomly disabled in MockInternalEngine. Closes #43034 --- .../main/java/org/elasticsearch/index/IndexService.java | 2 -- .../main/java/org/elasticsearch/index/engine/Engine.java | 2 -- .../org/elasticsearch/gateway/GatewayIndexStateIT.java | 9 ++++++--- .../memory/breaker/RandomExceptionCircuitBreakerIT.java | 8 ++------ .../search/basic/SearchWithRandomExceptionsIT.java | 8 ++------ .../java/org/elasticsearch/test/ESIntegTestCase.java | 9 ++++++++- .../elasticsearch/snapshots/SourceOnlySnapshotIT.java | 7 ++----- 7 files changed, 20 insertions(+), 25 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/IndexService.java b/server/src/main/java/org/elasticsearch/index/IndexService.java index a99fcda6b69..6c51a7a6f5c 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexService.java +++ b/server/src/main/java/org/elasticsearch/index/IndexService.java @@ -472,8 +472,6 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust try { // only flush we are we closed (closed index or shutdown) and if we are not deleted final boolean flushEngine = deleted.get() == false && closed.get(); - logger.trace("[{}] closing shard (flushEngine: {}, deleted: {}, closed: {})", shardId, flushEngine, deleted.get(), - closed.get()); indexShard.close(reason, flushEngine); } catch (Exception e) { logger.debug(() -> new ParameterizedMessage("[{}] failed to close index shard", shardId), e); diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index a50d0c790d4..e21b816aefd 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -1716,8 +1716,6 @@ public abstract class Engine implements Closeable { close(); // double close is not a problem } } - } else { - logger.trace("skipping flushAndClose as already closed"); } awaitPendingClose(); } diff --git a/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java b/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java index 59e5091faef..6de6cb58ff7 100644 --- a/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java +++ b/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java @@ -52,7 +52,6 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; import org.elasticsearch.test.InternalTestCluster.RestartCallback; -import org.elasticsearch.test.junit.annotations.TestLogging; import java.io.IOException; import java.util.List; @@ -76,6 +75,12 @@ public class GatewayIndexStateIT extends ESIntegTestCase { private final Logger logger = LogManager.getLogger(GatewayIndexStateIT.class); + @Override + protected boolean addMockInternalEngine() { + // testRecoverBrokenIndexMetadata replies on the flushing on shutdown behavior which can be randomly disabled in MockInternalEngine. + return false; + } + public void testMappingMetaDataParsed() throws Exception { logger.info("--> starting 1 nodes"); internalCluster().startNode(); @@ -346,8 +351,6 @@ public class GatewayIndexStateIT extends ESIntegTestCase { * allocated in our metadata that we recover. In that case we now have the ability to check the index on local recovery from disk * if it is sane and if we can successfully create an IndexService. This also includes plugins etc. */ - // temporarily enabling TRACE to aid debugging https://github.com/elastic/elasticsearch/issues/43034 - @TestLogging("_root:TRACE") public void testRecoverBrokenIndexMetadata() throws Exception { logger.info("--> starting one node"); internalCluster().startNode(); diff --git a/server/src/test/java/org/elasticsearch/indices/memory/breaker/RandomExceptionCircuitBreakerIT.java b/server/src/test/java/org/elasticsearch/indices/memory/breaker/RandomExceptionCircuitBreakerIT.java index f379b7ee522..b86b622705e 100644 --- a/server/src/test/java/org/elasticsearch/indices/memory/breaker/RandomExceptionCircuitBreakerIT.java +++ b/server/src/test/java/org/elasticsearch/indices/memory/breaker/RandomExceptionCircuitBreakerIT.java @@ -51,10 +51,8 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; -import java.util.HashSet; import java.util.List; import java.util.Random; -import java.util.Set; import java.util.concurrent.ExecutionException; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSuccessful; @@ -70,10 +68,8 @@ public class RandomExceptionCircuitBreakerIT extends ESIntegTestCase { } @Override - protected Collection> getMockPlugins() { - Set> mocks = new HashSet<>(super.getMockPlugins()); - mocks.remove(MockEngineFactoryPlugin.class); - return mocks; + protected boolean addMockInternalEngine() { + return false; } public void testBreakerWithRandomExceptions() throws IOException, InterruptedException, ExecutionException { diff --git a/server/src/test/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsIT.java b/server/src/test/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsIT.java index 891e64f5237..3d196d7a0d9 100644 --- a/server/src/test/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsIT.java +++ b/server/src/test/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsIT.java @@ -49,10 +49,8 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; -import java.util.HashSet; import java.util.List; import java.util.Random; -import java.util.Set; import java.util.concurrent.ExecutionException; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -65,10 +63,8 @@ public class SearchWithRandomExceptionsIT extends ESIntegTestCase { } @Override - protected Collection> getMockPlugins() { - Set> mocks = new HashSet<>(super.getMockPlugins()); - mocks.remove(MockEngineFactoryPlugin.class); - return mocks; + protected boolean addMockInternalEngine() { + return false; } public void testRandomExceptions() throws IOException, InterruptedException, ExecutionException { diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 21e7dc3f683..675c5c62c4e 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -1984,6 +1984,13 @@ public abstract class ESIntegTestCase extends ESTestCase { return true; } + /** + * Returns {@code true} if this test cluster can use a mock internal engine. Defaults to true. + */ + protected boolean addMockInternalEngine() { + return true; + } + /** * Returns a function that allows to wrap / filter all clients that are exposed by the test cluster. This is useful * for debugging or request / response pre and post processing. It also allows to intercept all calls done by the test @@ -2006,7 +2013,7 @@ public abstract class ESIntegTestCase extends ESTestCase { if (randomBoolean()) { mocks.add(NodeMocksPlugin.class); } - if (randomBoolean()) { + if (addMockInternalEngine() && randomBoolean()) { mocks.add(MockEngineFactoryPlugin.class); } if (randomBoolean()) { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotIT.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotIT.java index 81be978d331..a54c57aceb3 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotIT.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotIT.java @@ -24,7 +24,6 @@ import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.MockEngineFactoryPlugin; import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.query.QueryBuilders; @@ -65,10 +64,8 @@ public class SourceOnlySnapshotIT extends ESIntegTestCase { } @Override - protected Collection> getMockPlugins() { - Collection> classes = new ArrayList<>(super.getMockPlugins()); - classes.remove(MockEngineFactoryPlugin.class); - return classes; + protected boolean addMockInternalEngine() { + return false; } public static final class MyPlugin extends Plugin implements RepositoryPlugin, EnginePlugin { From 3607876a7188eb3a9b972a4c5e5026aee99caed4 Mon Sep 17 00:00:00 2001 From: Igor Motov Date: Thu, 27 Jun 2019 13:34:33 -0400 Subject: [PATCH 17/42] Geo: Makes coordinate validator in libs/geo plugable (#43657) Moves coordinate validation from Geometry constructors into parser. Relates #43644 --- .../elasticsearch/geo/geometry/Circle.java | 2 - .../geo/geometry/GeometryUtils.java | 78 -------- .../org/elasticsearch/geo/geometry/Line.java | 4 - .../org/elasticsearch/geo/geometry/Point.java | 2 - .../elasticsearch/geo/geometry/Rectangle.java | 30 --- .../geo/utils/GeographyValidator.java | 178 ++++++++++++++++++ .../geo/utils/GeometryValidator.java | 34 ++++ .../geo/utils/WellKnownText.java | 15 +- .../geo/geometry/BaseGeometryTestCase.java | 3 +- .../geo/geometry/CircleTests.java | 11 +- .../geo/geometry/GeometryCollectionTests.java | 3 +- .../geo/geometry/GeometryValidatorTests.java | 127 +++++++++++++ .../elasticsearch/geo/geometry/LineTests.java | 16 +- .../geo/geometry/LinearRingTests.java | 19 +- .../geo/geometry/MultiLineTests.java | 3 +- .../geo/geometry/MultiPointTests.java | 3 +- .../geo/geometry/MultiPolygonTests.java | 3 +- .../geo/geometry/PointTests.java | 11 +- .../geo/geometry/PolygonTests.java | 10 +- .../geo/geometry/RectangleTests.java | 17 +- .../org/elasticsearch/common/geo/GeoJson.java | 23 ++- .../common/geo/GeometryParser.java | 9 +- .../common/geo/BaseGeoParsingTestCase.java | 3 +- .../common/geo/GeoDistanceTests.java | 17 -- .../common/geo/GeoJsonParserTests.java | 47 ++--- .../common/geo/GeoJsonSerializationTests.java | 3 +- .../common/geo/GeometryParserTests.java | 2 +- .../extractor/fields/ExtractedField.java | 3 +- .../xpack/sql/jdbc/TypeConverter.java | 3 +- .../xpack/sql/qa/jdbc/JdbcAssert.java | 3 +- .../function/scalar/geo/GeoShape.java | 6 +- 31 files changed, 472 insertions(+), 216 deletions(-) delete mode 100644 libs/geo/src/main/java/org/elasticsearch/geo/geometry/GeometryUtils.java create mode 100644 libs/geo/src/main/java/org/elasticsearch/geo/utils/GeographyValidator.java create mode 100644 libs/geo/src/main/java/org/elasticsearch/geo/utils/GeometryValidator.java create mode 100644 libs/geo/src/test/java/org/elasticsearch/geo/geometry/GeometryValidatorTests.java diff --git a/libs/geo/src/main/java/org/elasticsearch/geo/geometry/Circle.java b/libs/geo/src/main/java/org/elasticsearch/geo/geometry/Circle.java index cb8e2c4cb33..ad9881ab72f 100644 --- a/libs/geo/src/main/java/org/elasticsearch/geo/geometry/Circle.java +++ b/libs/geo/src/main/java/org/elasticsearch/geo/geometry/Circle.java @@ -49,8 +49,6 @@ public class Circle implements Geometry { if (radiusMeters < 0 ) { throw new IllegalArgumentException("Circle radius [" + radiusMeters + "] cannot be negative"); } - GeometryUtils.checkLatitude(lat); - GeometryUtils.checkLongitude(lon); } @Override diff --git a/libs/geo/src/main/java/org/elasticsearch/geo/geometry/GeometryUtils.java b/libs/geo/src/main/java/org/elasticsearch/geo/geometry/GeometryUtils.java deleted file mode 100644 index c7bfa16b16a..00000000000 --- a/libs/geo/src/main/java/org/elasticsearch/geo/geometry/GeometryUtils.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.geo.geometry; - -/** - * Geometry-related utility methods - */ -public final class GeometryUtils { - /** - * Minimum longitude value. - */ - static final double MIN_LON_INCL = -180.0D; - - /** - * Maximum longitude value. - */ - static final double MAX_LON_INCL = 180.0D; - - /** - * Minimum latitude value. - */ - static final double MIN_LAT_INCL = -90.0D; - - /** - * Maximum latitude value. - */ - static final double MAX_LAT_INCL = 90.0D; - - // No instance: - private GeometryUtils() { - } - - /** - * validates latitude value is within standard +/-90 coordinate bounds - */ - static void checkLatitude(double latitude) { - if (Double.isNaN(latitude) || latitude < MIN_LAT_INCL || latitude > MAX_LAT_INCL) { - throw new IllegalArgumentException( - "invalid latitude " + latitude + "; must be between " + MIN_LAT_INCL + " and " + MAX_LAT_INCL); - } - } - - /** - * validates longitude value is within standard +/-180 coordinate bounds - */ - static void checkLongitude(double longitude) { - if (Double.isNaN(longitude) || longitude < MIN_LON_INCL || longitude > MAX_LON_INCL) { - throw new IllegalArgumentException( - "invalid longitude " + longitude + "; must be between " + MIN_LON_INCL + " and " + MAX_LON_INCL); - } - } - - public static double checkAltitude(final boolean ignoreZValue, double zValue) { - if (ignoreZValue == false) { - throw new IllegalArgumentException("found Z value [" + zValue + "] but [ignore_z_value] " - + "parameter is [" + ignoreZValue + "]"); - } - return zValue; - } - -} diff --git a/libs/geo/src/main/java/org/elasticsearch/geo/geometry/Line.java b/libs/geo/src/main/java/org/elasticsearch/geo/geometry/Line.java index c2c9cb4b83a..20f43142469 100644 --- a/libs/geo/src/main/java/org/elasticsearch/geo/geometry/Line.java +++ b/libs/geo/src/main/java/org/elasticsearch/geo/geometry/Line.java @@ -59,10 +59,6 @@ public class Line implements Geometry { if (alts != null && alts.length != lats.length) { throw new IllegalArgumentException("alts and lats must be equal length"); } - for (int i = 0; i < lats.length; i++) { - GeometryUtils.checkLatitude(lats[i]); - GeometryUtils.checkLongitude(lons[i]); - } } public int length() { diff --git a/libs/geo/src/main/java/org/elasticsearch/geo/geometry/Point.java b/libs/geo/src/main/java/org/elasticsearch/geo/geometry/Point.java index 248f433b96a..88fd5eb06fe 100644 --- a/libs/geo/src/main/java/org/elasticsearch/geo/geometry/Point.java +++ b/libs/geo/src/main/java/org/elasticsearch/geo/geometry/Point.java @@ -42,8 +42,6 @@ public class Point implements Geometry { } public Point(double lat, double lon, double alt) { - GeometryUtils.checkLatitude(lat); - GeometryUtils.checkLongitude(lon); this.lat = lat; this.lon = lon; this.alt = alt; diff --git a/libs/geo/src/main/java/org/elasticsearch/geo/geometry/Rectangle.java b/libs/geo/src/main/java/org/elasticsearch/geo/geometry/Rectangle.java index ca7ec2e57c9..75ba25721e7 100644 --- a/libs/geo/src/main/java/org/elasticsearch/geo/geometry/Rectangle.java +++ b/libs/geo/src/main/java/org/elasticsearch/geo/geometry/Rectangle.java @@ -71,10 +71,6 @@ public class Rectangle implements Geometry { * Constructs a bounding box by first validating the provided latitude and longitude coordinates */ public Rectangle(double minLat, double maxLat, double minLon, double maxLon, double minAlt, double maxAlt) { - GeometryUtils.checkLatitude(minLat); - GeometryUtils.checkLatitude(maxLat); - GeometryUtils.checkLongitude(minLon); - GeometryUtils.checkLongitude(maxLon); this.minLon = minLon; this.maxLon = maxLon; this.minLat = minLat; @@ -90,17 +86,6 @@ public class Rectangle implements Geometry { } } - public double getWidth() { - if (crossesDateline()) { - return GeometryUtils.MAX_LON_INCL - minLon + maxLon - GeometryUtils.MIN_LON_INCL; - } - return maxLon - minLon; - } - - public double getHeight() { - return maxLat - minLat; - } - public double getMinLat() { return minLat; } @@ -156,21 +141,6 @@ public class Rectangle implements Geometry { return b.toString(); } - /** - * Returns true if this bounding box crosses the dateline - */ - public boolean crossesDateline() { - return maxLon < minLon; - } - - /** returns true if rectangle (defined by minLat, maxLat, minLon, maxLon) contains the lat lon point */ - public boolean containsPoint(final double lat, final double lon) { - if (lat >= minLat && lat <= maxLat) { - return crossesDateline() ? lon >= minLon || lon <= maxLon : lon >= minLon && lon <= maxLon; - } - return false; - } - @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/libs/geo/src/main/java/org/elasticsearch/geo/utils/GeographyValidator.java b/libs/geo/src/main/java/org/elasticsearch/geo/utils/GeographyValidator.java new file mode 100644 index 00000000000..756792358ab --- /dev/null +++ b/libs/geo/src/main/java/org/elasticsearch/geo/utils/GeographyValidator.java @@ -0,0 +1,178 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.geo.utils; + +import org.elasticsearch.geo.geometry.Circle; +import org.elasticsearch.geo.geometry.Geometry; +import org.elasticsearch.geo.geometry.GeometryCollection; +import org.elasticsearch.geo.geometry.GeometryVisitor; +import org.elasticsearch.geo.geometry.Line; +import org.elasticsearch.geo.geometry.LinearRing; +import org.elasticsearch.geo.geometry.MultiLine; +import org.elasticsearch.geo.geometry.MultiPoint; +import org.elasticsearch.geo.geometry.MultiPolygon; +import org.elasticsearch.geo.geometry.Point; +import org.elasticsearch.geo.geometry.Polygon; +import org.elasticsearch.geo.geometry.Rectangle; + +/** + * Validator that checks that lats are between -90 and +90 and lons are between -180 and +180 and altitude is present only if + * ignoreZValue is set to true + */ +public class GeographyValidator implements GeometryValidator { + + /** + * Minimum longitude value. + */ + private static final double MIN_LON_INCL = -180.0D; + + /** + * Maximum longitude value. + */ + private static final double MAX_LON_INCL = 180.0D; + + /** + * Minimum latitude value. + */ + private static final double MIN_LAT_INCL = -90.0D; + + /** + * Maximum latitude value. + */ + private static final double MAX_LAT_INCL = 90.0D; + + private final boolean ignoreZValue; + + public GeographyValidator(boolean ignoreZValue) { + this.ignoreZValue = ignoreZValue; + } + + /** + * validates latitude value is within standard +/-90 coordinate bounds + */ + protected void checkLatitude(double latitude) { + if (Double.isNaN(latitude) || latitude < MIN_LAT_INCL || latitude > MAX_LAT_INCL) { + throw new IllegalArgumentException( + "invalid latitude " + latitude + "; must be between " + MIN_LAT_INCL + " and " + MAX_LAT_INCL); + } + } + + /** + * validates longitude value is within standard +/-180 coordinate bounds + */ + protected void checkLongitude(double longitude) { + if (Double.isNaN(longitude) || longitude < MIN_LON_INCL || longitude > MAX_LON_INCL) { + throw new IllegalArgumentException( + "invalid longitude " + longitude + "; must be between " + MIN_LON_INCL + " and " + MAX_LON_INCL); + } + } + + protected void checkAltitude(double zValue) { + if (ignoreZValue == false && Double.isNaN(zValue) == false) { + throw new IllegalArgumentException("found Z value [" + zValue + "] but [ignore_z_value] " + + "parameter is [" + ignoreZValue + "]"); + } + } + + @Override + public void validate(Geometry geometry) { + geometry.visit(new GeometryVisitor() { + + @Override + public Void visit(Circle circle) throws RuntimeException { + checkLatitude(circle.getLat()); + checkLongitude(circle.getLon()); + checkAltitude(circle.getAlt()); + return null; + } + + @Override + public Void visit(GeometryCollection collection) throws RuntimeException { + for (Geometry g : collection) { + g.visit(this); + } + return null; + } + + @Override + public Void visit(Line line) throws RuntimeException { + for (int i = 0; i < line.length(); i++) { + checkLatitude(line.getLat(i)); + checkLongitude(line.getLon(i)); + checkAltitude(line.getAlt(i)); + } + return null; + } + + @Override + public Void visit(LinearRing ring) throws RuntimeException { + for (int i = 0; i < ring.length(); i++) { + checkLatitude(ring.getLat(i)); + checkLongitude(ring.getLon(i)); + checkAltitude(ring.getAlt(i)); + } + return null; + } + + @Override + public Void visit(MultiLine multiLine) throws RuntimeException { + return visit((GeometryCollection) multiLine); + } + + @Override + public Void visit(MultiPoint multiPoint) throws RuntimeException { + return visit((GeometryCollection) multiPoint); + } + + @Override + public Void visit(MultiPolygon multiPolygon) throws RuntimeException { + return visit((GeometryCollection) multiPolygon); + } + + @Override + public Void visit(Point point) throws RuntimeException { + checkLatitude(point.getLat()); + checkLongitude(point.getLon()); + checkAltitude(point.getAlt()); + return null; + } + + @Override + public Void visit(Polygon polygon) throws RuntimeException { + polygon.getPolygon().visit(this); + for (int i = 0; i < polygon.getNumberOfHoles(); i++) { + polygon.getHole(i).visit(this); + } + return null; + } + + @Override + public Void visit(Rectangle rectangle) throws RuntimeException { + checkLatitude(rectangle.getMinLat()); + checkLatitude(rectangle.getMaxLat()); + checkLongitude(rectangle.getMinLon()); + checkLongitude(rectangle.getMaxLon()); + checkAltitude(rectangle.getMinAlt()); + checkAltitude(rectangle.getMaxAlt()); + return null; + } + }); + } +} diff --git a/libs/geo/src/main/java/org/elasticsearch/geo/utils/GeometryValidator.java b/libs/geo/src/main/java/org/elasticsearch/geo/utils/GeometryValidator.java new file mode 100644 index 00000000000..2caf6738ed4 --- /dev/null +++ b/libs/geo/src/main/java/org/elasticsearch/geo/utils/GeometryValidator.java @@ -0,0 +1,34 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.geo.utils; + +import org.elasticsearch.geo.geometry.Geometry; + +/** + * Generic geometry validator that can be used by the parser to verify the validity of the parsed geometry + */ +public interface GeometryValidator { + + /** + * Validates the geometry and throws IllegalArgumentException if the geometry is not valid + */ + void validate(Geometry geometry); + +} diff --git a/libs/geo/src/main/java/org/elasticsearch/geo/utils/WellKnownText.java b/libs/geo/src/main/java/org/elasticsearch/geo/utils/WellKnownText.java index 007bb036cec..4fd4bdb6fd1 100644 --- a/libs/geo/src/main/java/org/elasticsearch/geo/utils/WellKnownText.java +++ b/libs/geo/src/main/java/org/elasticsearch/geo/utils/WellKnownText.java @@ -22,7 +22,6 @@ package org.elasticsearch.geo.utils; import org.elasticsearch.geo.geometry.Circle; import org.elasticsearch.geo.geometry.Geometry; import org.elasticsearch.geo.geometry.GeometryCollection; -import org.elasticsearch.geo.geometry.GeometryUtils; import org.elasticsearch.geo.geometry.GeometryVisitor; import org.elasticsearch.geo.geometry.Line; import org.elasticsearch.geo.geometry.LinearRing; @@ -58,11 +57,11 @@ public class WellKnownText { private final String EOL = "END-OF-LINE"; private final boolean coerce; - private final boolean ignoreZValue; + private final GeometryValidator validator; - public WellKnownText(boolean coerce, boolean ignoreZValue) { + public WellKnownText(boolean coerce, GeometryValidator validator) { this.coerce = coerce; - this.ignoreZValue = ignoreZValue; + this.validator = validator; } public String toWKT(Geometry geometry) { @@ -243,7 +242,9 @@ public class WellKnownText { tokenizer.whitespaceChars('\r', '\r'); tokenizer.whitespaceChars('\n', '\n'); tokenizer.commentChar('#'); - return parseGeometry(tokenizer); + Geometry geometry = parseGeometry(tokenizer); + validator.validate(geometry); + return geometry; } finally { reader.close(); } @@ -297,7 +298,7 @@ public class WellKnownText { double lat = nextNumber(stream); Point pt; if (isNumberNext(stream)) { - pt = new Point(lat, lon, GeometryUtils.checkAltitude(ignoreZValue, nextNumber(stream))); + pt = new Point(lat, lon, nextNumber(stream)); } else { pt = new Point(lat, lon); } @@ -318,7 +319,7 @@ public class WellKnownText { lons.add(nextNumber(stream)); lats.add(nextNumber(stream)); if (isNumberNext(stream)) { - alts.add(GeometryUtils.checkAltitude(ignoreZValue, nextNumber(stream))); + alts.add(nextNumber(stream)); } if (alts.isEmpty() == false && alts.size() != lons.size()) { throw new ParseException("coordinate dimensions do not match: " + tokenString(stream), stream.lineno()); diff --git a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/BaseGeometryTestCase.java b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/BaseGeometryTestCase.java index 47d0f4285ad..073bff4cb75 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/BaseGeometryTestCase.java +++ b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/BaseGeometryTestCase.java @@ -22,6 +22,7 @@ package org.elasticsearch.geo.geometry; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.geo.utils.GeographyValidator; import org.elasticsearch.geo.utils.WellKnownText; import org.elasticsearch.test.AbstractWireTestCase; @@ -53,7 +54,7 @@ abstract class BaseGeometryTestCase extends AbstractWireTest @SuppressWarnings("unchecked") @Override protected T copyInstance(T instance, Version version) throws IOException { - WellKnownText wkt = new WellKnownText(true, true); + WellKnownText wkt = new WellKnownText(true, new GeographyValidator(true)); String text = wkt.toWKT(instance); try { return (T) wkt.fromWKT(text); diff --git a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/CircleTests.java b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/CircleTests.java index 8bad65db616..e8912a39fb4 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/CircleTests.java +++ b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/CircleTests.java @@ -19,6 +19,8 @@ package org.elasticsearch.geo.geometry; +import org.elasticsearch.geo.utils.GeographyValidator; +import org.elasticsearch.geo.utils.GeometryValidator; import org.elasticsearch.geo.utils.WellKnownText; import java.io.IOException; @@ -36,7 +38,7 @@ public class CircleTests extends BaseGeometryTestCase { } public void testBasicSerialization() throws IOException, ParseException { - WellKnownText wkt = new WellKnownText(true, true); + WellKnownText wkt = new WellKnownText(true, new GeographyValidator(true)); assertEquals("circle (20.0 10.0 15.0)", wkt.toWKT(new Circle(10, 20, 15))); assertEquals(new Circle(10, 20, 15), wkt.fromWKT("circle (20.0 10.0 15.0)")); @@ -48,13 +50,14 @@ public class CircleTests extends BaseGeometryTestCase { } public void testInitValidation() { - IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> new Circle(10, 20, -1)); + GeometryValidator validator = new GeographyValidator(true); + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> validator.validate(new Circle(10, 20, -1))); assertEquals("Circle radius [-1.0] cannot be negative", ex.getMessage()); - ex = expectThrows(IllegalArgumentException.class, () -> new Circle(100, 20, 1)); + ex = expectThrows(IllegalArgumentException.class, () -> validator.validate(new Circle(100, 20, 1))); assertEquals("invalid latitude 100.0; must be between -90.0 and 90.0", ex.getMessage()); - ex = expectThrows(IllegalArgumentException.class, () -> new Circle(10, 200, 1)); + ex = expectThrows(IllegalArgumentException.class, () -> validator.validate(new Circle(10, 200, 1))); assertEquals("invalid longitude 200.0; must be between -180.0 and 180.0", ex.getMessage()); } } diff --git a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/GeometryCollectionTests.java b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/GeometryCollectionTests.java index 905d0f3c125..c78c47dfbcd 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/GeometryCollectionTests.java +++ b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/GeometryCollectionTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.geo.geometry; +import org.elasticsearch.geo.utils.GeographyValidator; import org.elasticsearch.geo.utils.WellKnownText; import java.io.IOException; @@ -35,7 +36,7 @@ public class GeometryCollectionTests extends BaseGeometryTestCase(Arrays.asList(new Point(10, 20), Point.EMPTY)))); diff --git a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/GeometryValidatorTests.java b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/GeometryValidatorTests.java new file mode 100644 index 00000000000..c747fc2df50 --- /dev/null +++ b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/GeometryValidatorTests.java @@ -0,0 +1,127 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.geo.geometry; + +import org.elasticsearch.geo.utils.GeographyValidator; +import org.elasticsearch.geo.utils.GeometryValidator; +import org.elasticsearch.geo.utils.WellKnownText; +import org.elasticsearch.test.ESTestCase; + +public class GeometryValidatorTests extends ESTestCase { + + public static class NoopValidator implements GeometryValidator { + + @Override + public void validate(Geometry geometry) { + + } + } + + public static class OneValidator extends GeographyValidator { + /** + * Minimum longitude value. + */ + private static final double MIN_LON_INCL = -1D; + + /** + * Maximum longitude value. + */ + private static final double MAX_LON_INCL = 1D; + + /** + * Minimum latitude value. + */ + private static final double MIN_LAT_INCL = -1D; + + /** + * Maximum latitude value. + */ + private static final double MAX_LAT_INCL = 1D; + + /** + * Minimum altitude value. + */ + private static final double MIN_ALT_INCL = -1D; + + /** + * Maximum altitude value. + */ + private static final double MAX_ALT_INCL = 1D; + + public OneValidator() { + super(true); + } + + @Override + protected void checkLatitude(double latitude) { + if (Double.isNaN(latitude) || latitude < MIN_LAT_INCL || latitude > MAX_LAT_INCL) { + throw new IllegalArgumentException( + "invalid latitude " + latitude + "; must be between " + MIN_LAT_INCL + " and " + MAX_LAT_INCL); + } + } + + @Override + protected void checkLongitude(double longitude) { + if (Double.isNaN(longitude) || longitude < MIN_LON_INCL || longitude > MAX_LON_INCL) { + throw new IllegalArgumentException( + "invalid longitude " + longitude + "; must be between " + MIN_LON_INCL + " and " + MAX_LON_INCL); + } + } + + @Override + protected void checkAltitude(double zValue) { + if (Double.isNaN(zValue) == false && (zValue < MIN_ALT_INCL || zValue > MAX_ALT_INCL)) { + throw new IllegalArgumentException( + "invalid altitude " + zValue + "; must be between " + MIN_ALT_INCL + " and " + MAX_ALT_INCL); + } + } + } + + public void testNoopValidator() throws Exception { + WellKnownText parser = new WellKnownText(true, new NoopValidator()); + parser.fromWKT("CIRCLE (10000 20000 30000)"); + parser.fromWKT("POINT (10000 20000)"); + parser.fromWKT("LINESTRING (10000 20000, 0 0)"); + parser.fromWKT("POLYGON ((300 100, 400 200, 500 300, 300 100), (50 150, 250 150, 200 100))"); + parser.fromWKT("MULTIPOINT (10000 20000, 20000 30000)"); + } + + public void testOneValidator() throws Exception { + WellKnownText parser = new WellKnownText(true, new OneValidator()); + parser.fromWKT("POINT (0 1)"); + parser.fromWKT("POINT (0 1 0.5)"); + IllegalArgumentException ex; + ex = expectThrows(IllegalArgumentException.class, () -> parser.fromWKT("CIRCLE (1 2 3)")); + assertEquals("invalid latitude 2.0; must be between -1.0 and 1.0", ex.getMessage()); + ex = expectThrows(IllegalArgumentException.class, () -> parser.fromWKT("POINT (2 1)")); + assertEquals("invalid longitude 2.0; must be between -1.0 and 1.0", ex.getMessage()); + ex = expectThrows(IllegalArgumentException.class, () -> parser.fromWKT("LINESTRING (1 -1 0, 0 0 2)")); + assertEquals("invalid altitude 2.0; must be between -1.0 and 1.0", ex.getMessage()); + ex = expectThrows(IllegalArgumentException.class, () -> parser.fromWKT("POLYGON ((0.3 0.1, 0.4 0.2, 5 0.3, 0.3 0.1))")); + assertEquals("invalid longitude 5.0; must be between -1.0 and 1.0", ex.getMessage()); + ex = expectThrows(IllegalArgumentException.class, () -> parser.fromWKT( + "POLYGON ((0.3 0.1, 0.4 0.2, 0.5 0.3, 0.3 0.1), (0.5 1.5, 2.5 1.5, 2.0 1.0))")); + assertEquals("invalid latitude 1.5; must be between -1.0 and 1.0", ex.getMessage()); + ex = expectThrows(IllegalArgumentException.class, () -> parser.fromWKT("MULTIPOINT (0 1, -2 1)")); + assertEquals("invalid longitude -2.0; must be between -1.0 and 1.0", ex.getMessage()); + } + + +} diff --git a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/LineTests.java b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/LineTests.java index 0f59940f973..b9f8cb37f54 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/LineTests.java +++ b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/LineTests.java @@ -19,6 +19,8 @@ package org.elasticsearch.geo.geometry; +import org.elasticsearch.geo.utils.GeographyValidator; +import org.elasticsearch.geo.utils.GeometryValidator; import org.elasticsearch.geo.utils.WellKnownText; import java.io.IOException; @@ -31,7 +33,7 @@ public class LineTests extends BaseGeometryTestCase { } public void testBasicSerialization() throws IOException, ParseException { - WellKnownText wkt = new WellKnownText(true, true); + WellKnownText wkt = new WellKnownText(true, new GeographyValidator(true)); assertEquals("linestring (3.0 1.0, 4.0 2.0)", wkt.toWKT(new Line(new double[]{1, 2}, new double[]{3, 4}))); assertEquals(new Line(new double[]{1, 2}, new double[]{3, 4}), wkt.fromWKT("linestring (3 1, 4 2)")); @@ -45,19 +47,23 @@ public class LineTests extends BaseGeometryTestCase { } public void testInitValidation() { - IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> new Line(new double[]{1}, new double[]{3})); + GeometryValidator validator = new GeographyValidator(true); + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, + () -> validator.validate(new Line(new double[]{1}, new double[]{3}))); assertEquals("at least two points in the line is required", ex.getMessage()); - ex = expectThrows(IllegalArgumentException.class, () -> new Line(new double[]{1, 2, 3, 1}, new double[]{3, 4, 500, 3})); + ex = expectThrows(IllegalArgumentException.class, + () -> validator.validate(new Line(new double[]{1, 2, 3, 1}, new double[]{3, 4, 500, 3}))); assertEquals("invalid longitude 500.0; must be between -180.0 and 180.0", ex.getMessage()); - ex = expectThrows(IllegalArgumentException.class, () -> new Line(new double[]{1, 100, 3, 1}, new double[]{3, 4, 5, 3})); + ex = expectThrows(IllegalArgumentException.class, + () -> validator.validate(new Line(new double[]{1, 100, 3, 1}, new double[]{3, 4, 5, 3}))); assertEquals("invalid latitude 100.0; must be between -90.0 and 90.0", ex.getMessage()); } public void testWKTValidation() { IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, - () -> new WellKnownText(randomBoolean(), false).fromWKT("linestring (3 1 6, 4 2 5)")); + () -> new WellKnownText(randomBoolean(), new GeographyValidator(false)).fromWKT("linestring (3 1 6, 4 2 5)")); assertEquals("found Z value [6.0] but [ignore_z_value] parameter is [false]", ex.getMessage()); } } diff --git a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/LinearRingTests.java b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/LinearRingTests.java index 9327e2046d5..07e9e866233 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/LinearRingTests.java +++ b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/LinearRingTests.java @@ -19,6 +19,8 @@ package org.elasticsearch.geo.geometry; +import org.elasticsearch.geo.utils.GeographyValidator; +import org.elasticsearch.geo.utils.GeometryValidator; import org.elasticsearch.geo.utils.WellKnownText; import org.elasticsearch.test.ESTestCase; @@ -26,30 +28,35 @@ public class LinearRingTests extends ESTestCase { public void testBasicSerialization() { UnsupportedOperationException ex = expectThrows(UnsupportedOperationException.class, - () -> new WellKnownText(true, true).toWKT(new LinearRing(new double[]{1, 2, 3, 1}, new double[]{3, 4, 5, 3}))); + () -> new WellKnownText(true, new GeographyValidator(true)) + .toWKT(new LinearRing(new double[]{1, 2, 3, 1}, new double[]{3, 4, 5, 3}))); assertEquals("line ring cannot be serialized using WKT", ex.getMessage()); } public void testInitValidation() { + GeometryValidator validator = new GeographyValidator(true); IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, - () -> new LinearRing(new double[]{1, 2, 3}, new double[]{3, 4, 5})); + () -> validator.validate(new LinearRing(new double[]{1, 2, 3}, new double[]{3, 4, 5}))); assertEquals("first and last points of the linear ring must be the same (it must close itself): lats[0]=1.0 lats[2]=3.0 " + "lons[0]=3.0 lons[2]=5.0", ex.getMessage()); ex = expectThrows(IllegalArgumentException.class, - () -> new LinearRing(new double[]{1, 2, 1}, new double[]{3, 4, 3}, new double[]{1, 2, 3})); + () -> validator.validate(new LinearRing(new double[]{1, 2, 1}, new double[]{3, 4, 3}, new double[]{1, 2, 3}))); assertEquals("first and last points of the linear ring must be the same (it must close itself): lats[0]=1.0 lats[2]=1.0 " + "lons[0]=3.0 lons[2]=3.0 alts[0]=1.0 alts[2]=3.0", ex.getMessage()); - ex = expectThrows(IllegalArgumentException.class, () -> new LinearRing(new double[]{1}, new double[]{3})); + ex = expectThrows(IllegalArgumentException.class, + () -> validator.validate(new LinearRing(new double[]{1}, new double[]{3}))); assertEquals("at least two points in the line is required", ex.getMessage()); - ex = expectThrows(IllegalArgumentException.class, () -> new LinearRing(new double[]{1, 2, 3, 1}, new double[]{3, 4, 500, 3})); + ex = expectThrows(IllegalArgumentException.class, + () -> validator.validate(new LinearRing(new double[]{1, 2, 3, 1}, new double[]{3, 4, 500, 3}))); assertEquals("invalid longitude 500.0; must be between -180.0 and 180.0", ex.getMessage()); - ex = expectThrows(IllegalArgumentException.class, () -> new LinearRing(new double[]{1, 100, 3, 1}, new double[]{3, 4, 5, 3})); + ex = expectThrows(IllegalArgumentException.class, + () -> validator.validate(new LinearRing(new double[]{1, 100, 3, 1}, new double[]{3, 4, 5, 3}))); assertEquals("invalid latitude 100.0; must be between -90.0 and 90.0", ex.getMessage()); } diff --git a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/MultiLineTests.java b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/MultiLineTests.java index 22e0c4459a3..9ed782e65cc 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/MultiLineTests.java +++ b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/MultiLineTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.geo.geometry; +import org.elasticsearch.geo.utils.GeographyValidator; import org.elasticsearch.geo.utils.WellKnownText; import java.io.IOException; @@ -40,7 +41,7 @@ public class MultiLineTests extends BaseGeometryTestCase { } public void testBasicSerialization() throws IOException, ParseException { - WellKnownText wkt = new WellKnownText(true, true); + WellKnownText wkt = new WellKnownText(true, new GeographyValidator(true)); assertEquals("multilinestring ((3.0 1.0, 4.0 2.0))", wkt.toWKT( new MultiLine(Collections.singletonList(new Line(new double[]{1, 2}, new double[]{3, 4}))))); assertEquals(new MultiLine(Collections.singletonList(new Line(new double[]{1, 2}, new double[]{3, 4}))), diff --git a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/MultiPointTests.java b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/MultiPointTests.java index d3f8b5738cb..c170adf9c94 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/MultiPointTests.java +++ b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/MultiPointTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.geo.geometry; +import org.elasticsearch.geo.utils.GeographyValidator; import org.elasticsearch.geo.utils.WellKnownText; import java.io.IOException; @@ -41,7 +42,7 @@ public class MultiPointTests extends BaseGeometryTestCase { } public void testBasicSerialization() throws IOException, ParseException { - WellKnownText wkt = new WellKnownText(true, true); + WellKnownText wkt = new WellKnownText(true, new GeographyValidator(true)); assertEquals("multipoint (2.0 1.0)", wkt.toWKT( new MultiPoint(Collections.singletonList(new Point(1, 2))))); assertEquals(new MultiPoint(Collections.singletonList(new Point(1 ,2))), diff --git a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/MultiPolygonTests.java b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/MultiPolygonTests.java index fb4d8821ac3..9918dfa546c 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/MultiPolygonTests.java +++ b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/MultiPolygonTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.geo.geometry; +import org.elasticsearch.geo.utils.GeographyValidator; import org.elasticsearch.geo.utils.WellKnownText; import java.io.IOException; @@ -40,7 +41,7 @@ public class MultiPolygonTests extends BaseGeometryTestCase { } public void testBasicSerialization() throws IOException, ParseException { - WellKnownText wkt = new WellKnownText(true, true); + WellKnownText wkt = new WellKnownText(true, new GeographyValidator(true)); assertEquals("multipolygon (((3.0 1.0, 4.0 2.0, 5.0 3.0, 3.0 1.0)))", wkt.toWKT(new MultiPolygon(Collections.singletonList( new Polygon(new LinearRing(new double[]{1, 2, 3, 1}, new double[]{3, 4, 5, 3})))))); diff --git a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/PointTests.java b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/PointTests.java index 4b590a3beb5..82e8fc40e75 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/PointTests.java +++ b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/PointTests.java @@ -19,6 +19,8 @@ package org.elasticsearch.geo.geometry; +import org.elasticsearch.geo.utils.GeographyValidator; +import org.elasticsearch.geo.utils.GeometryValidator; import org.elasticsearch.geo.utils.WellKnownText; import java.io.IOException; @@ -31,7 +33,7 @@ public class PointTests extends BaseGeometryTestCase { } public void testBasicSerialization() throws IOException, ParseException { - WellKnownText wkt = new WellKnownText(true, true); + WellKnownText wkt = new WellKnownText(true, new GeographyValidator(true)); assertEquals("point (20.0 10.0)", wkt.toWKT(new Point(10, 20))); assertEquals(new Point(10, 20), wkt.fromWKT("point (20.0 10.0)")); @@ -43,16 +45,17 @@ public class PointTests extends BaseGeometryTestCase { } public void testInitValidation() { - IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> new Point(100, 10)); + GeometryValidator validator = new GeographyValidator(true); + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> validator.validate(new Point(100, 10))); assertEquals("invalid latitude 100.0; must be between -90.0 and 90.0", ex.getMessage()); - ex = expectThrows(IllegalArgumentException.class, () -> new Point(10, 500)); + ex = expectThrows(IllegalArgumentException.class, () -> validator.validate(new Point(10, 500))); assertEquals("invalid longitude 500.0; must be between -180.0 and 180.0", ex.getMessage()); } public void testWKTValidation() { IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, - () -> new WellKnownText(randomBoolean(), false).fromWKT("point (20.0 10.0 100.0)")); + () -> new WellKnownText(randomBoolean(), new GeographyValidator(false)).fromWKT("point (20.0 10.0 100.0)")); assertEquals("found Z value [100.0] but [ignore_z_value] parameter is [false]", ex.getMessage()); } } diff --git a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/PolygonTests.java b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/PolygonTests.java index 33a5325c87b..adbe1f38cdc 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/PolygonTests.java +++ b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/PolygonTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.geo.geometry; +import org.elasticsearch.geo.utils.GeographyValidator; import org.elasticsearch.geo.utils.WellKnownText; import java.io.IOException; @@ -32,7 +33,7 @@ public class PolygonTests extends BaseGeometryTestCase { } public void testBasicSerialization() throws IOException, ParseException { - WellKnownText wkt = new WellKnownText(true, true); + WellKnownText wkt = new WellKnownText(true, new GeographyValidator(true)); assertEquals("polygon ((3.0 1.0, 4.0 2.0, 5.0 3.0, 3.0 1.0))", wkt.toWKT(new Polygon(new LinearRing(new double[]{1, 2, 3, 1}, new double[]{3, 4, 5, 3})))); assertEquals(new Polygon(new LinearRing(new double[]{1, 2, 3, 1}, new double[]{3, 4, 5, 3})), @@ -73,16 +74,17 @@ public class PolygonTests extends BaseGeometryTestCase { public void testWKTValidation() { IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, - () -> new WellKnownText(false, true).fromWKT("polygon ((3 1 5, 4 2 4, 5 3 3))")); + () -> new WellKnownText(false, new GeographyValidator(true)).fromWKT("polygon ((3 1 5, 4 2 4, 5 3 3))")); assertEquals("first and last points of the linear ring must be the same (it must close itself): " + "lats[0]=1.0 lats[2]=3.0 lons[0]=3.0 lons[2]=5.0 alts[0]=5.0 alts[2]=3.0", ex.getMessage()); ex = expectThrows(IllegalArgumentException.class, - () -> new WellKnownText(randomBoolean(), false).fromWKT("polygon ((3 1 5, 4 2 4, 5 3 3, 3 1 5))")); + () -> new WellKnownText(randomBoolean(), new GeographyValidator(false)).fromWKT("polygon ((3 1 5, 4 2 4, 5 3 3, 3 1 5))")); assertEquals("found Z value [5.0] but [ignore_z_value] parameter is [false]", ex.getMessage()); ex = expectThrows(IllegalArgumentException.class, - () -> new WellKnownText(false, randomBoolean()).fromWKT("polygon ((3 1, 4 2, 5 3, 3 1), (0.5 1.5, 2.5 1.5, 2.0 1.0))")); + () -> new WellKnownText(false, new GeographyValidator(randomBoolean())).fromWKT( + "polygon ((3 1, 4 2, 5 3, 3 1), (0.5 1.5, 2.5 1.5, 2.0 1.0))")); assertEquals("first and last points of the linear ring must be the same (it must close itself): " + "lats[0]=1.5 lats[2]=1.0 lons[0]=0.5 lons[2]=2.0", ex.getMessage()); } diff --git a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/RectangleTests.java b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/RectangleTests.java index afbf9f1ae8a..8bd1494eb34 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/RectangleTests.java +++ b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/RectangleTests.java @@ -19,6 +19,8 @@ package org.elasticsearch.geo.geometry; +import org.elasticsearch.geo.utils.GeographyValidator; +import org.elasticsearch.geo.utils.GeometryValidator; import org.elasticsearch.geo.utils.WellKnownText; import java.io.IOException; @@ -32,7 +34,7 @@ public class RectangleTests extends BaseGeometryTestCase { } public void testBasicSerialization() throws IOException, ParseException { - WellKnownText wkt = new WellKnownText(true, true); + WellKnownText wkt = new WellKnownText(true, new GeographyValidator(true)); assertEquals("bbox (10.0, 20.0, 40.0, 30.0)", wkt.toWKT(new Rectangle(30, 40, 10, 20))); assertEquals(new Rectangle(30, 40, 10, 20), wkt.fromWKT("bbox (10.0, 20.0, 40.0, 30.0)")); @@ -41,16 +43,21 @@ public class RectangleTests extends BaseGeometryTestCase { } public void testInitValidation() { - IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> new Rectangle(100, 1, 2, 3)); + GeometryValidator validator = new GeographyValidator(true); + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, + () -> validator.validate(new Rectangle(1, 100, 2, 3))); assertEquals("invalid latitude 100.0; must be between -90.0 and 90.0", ex.getMessage()); - ex = expectThrows(IllegalArgumentException.class, () -> new Rectangle(1, 2, 200, 3)); + ex = expectThrows(IllegalArgumentException.class, + () -> validator.validate(new Rectangle(1, 2, 200, 3))); assertEquals("invalid longitude 200.0; must be between -180.0 and 180.0", ex.getMessage()); - ex = expectThrows(IllegalArgumentException.class, () -> new Rectangle(2, 1, 2, 3)); + ex = expectThrows(IllegalArgumentException.class, + () -> validator.validate(new Rectangle(2, 1, 2, 3))); assertEquals("max lat cannot be less than min lat", ex.getMessage()); - ex = expectThrows(IllegalArgumentException.class, () -> new Rectangle(1, 2, 2, 3, 5, Double.NaN)); + ex = expectThrows(IllegalArgumentException.class, + () -> validator.validate(new Rectangle(1, 2, 2, 3, 5, Double.NaN))); assertEquals("only one altitude value is specified", ex.getMessage()); } } diff --git a/server/src/main/java/org/elasticsearch/common/geo/GeoJson.java b/server/src/main/java/org/elasticsearch/common/geo/GeoJson.java index 77722310613..4508d389358 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/GeoJson.java +++ b/server/src/main/java/org/elasticsearch/common/geo/GeoJson.java @@ -44,6 +44,7 @@ import org.elasticsearch.geo.geometry.Point; import org.elasticsearch.geo.geometry.Polygon; import org.elasticsearch.geo.geometry.Rectangle; import org.elasticsearch.geo.geometry.ShapeType; +import org.elasticsearch.geo.utils.GeometryValidator; import java.io.IOException; import java.util.ArrayList; @@ -66,18 +67,20 @@ public final class GeoJson { private final boolean rightOrientation; private final boolean coerce; - private final boolean ignoreZValue; + private final GeometryValidator validator; - public GeoJson(boolean rightOrientation, boolean coerce, boolean ignoreZValue) { + public GeoJson(boolean rightOrientation, boolean coerce, GeometryValidator validator) { this.rightOrientation = rightOrientation; this.coerce = coerce; - this.ignoreZValue = ignoreZValue; + this.validator = validator; } public Geometry fromXContent(XContentParser parser) throws IOException { try (XContentSubParser subParser = new XContentSubParser(parser)) { - return PARSER.apply(subParser, this); + Geometry geometry = PARSER.apply(subParser, this); + validator.validate(geometry); + return geometry; } } @@ -215,7 +218,7 @@ public final class GeoJson { static { PARSER.declareString(constructorArg(), FIELD_TYPE); - PARSER.declareField(optionalConstructorArg(), (p, c) -> parseCoordinates(p, c.ignoreZValue), FIELD_COORDINATES, + PARSER.declareField(optionalConstructorArg(), (p, c) -> parseCoordinates(p), FIELD_COORDINATES, ObjectParser.ValueType.VALUE_ARRAY); PARSER.declareObjectArray(optionalConstructorArg(), PARSER, FIELD_GEOMETRIES); PARSER.declareString(optionalConstructorArg(), FIELD_ORIENTATION); @@ -298,20 +301,20 @@ public final class GeoJson { * Recursive method which parses the arrays of coordinates used to define * Shapes */ - private static CoordinateNode parseCoordinates(XContentParser parser, boolean ignoreZValue) throws IOException { + private static CoordinateNode parseCoordinates(XContentParser parser) throws IOException { XContentParser.Token token = parser.nextToken(); // Base cases if (token != XContentParser.Token.START_ARRAY && token != XContentParser.Token.END_ARRAY && token != XContentParser.Token.VALUE_NULL) { - return new CoordinateNode(parseCoordinate(parser, ignoreZValue)); + return new CoordinateNode(parseCoordinate(parser)); } else if (token == XContentParser.Token.VALUE_NULL) { throw new IllegalArgumentException("coordinates cannot contain NULL values)"); } List nodes = new ArrayList<>(); while (token != XContentParser.Token.END_ARRAY) { - CoordinateNode node = parseCoordinates(parser, ignoreZValue); + CoordinateNode node = parseCoordinates(parser); if (nodes.isEmpty() == false && nodes.get(0).numDimensions() != node.numDimensions()) { throw new ElasticsearchParseException("Exception parsing coordinates: number of dimensions do not match"); } @@ -325,7 +328,7 @@ public final class GeoJson { /** * Parser a singe set of 2 or 3 coordinates */ - private static Point parseCoordinate(XContentParser parser, boolean ignoreZValue) throws IOException { + private static Point parseCoordinate(XContentParser parser) throws IOException { // Add support for coerce here if (parser.currentToken() != XContentParser.Token.VALUE_NUMBER) { throw new ElasticsearchParseException("geo coordinates must be numbers"); @@ -339,7 +342,7 @@ public final class GeoJson { // alt (for storing purposes only - future use includes 3d shapes) double alt = Double.NaN; if (token == XContentParser.Token.VALUE_NUMBER) { - alt = GeoPoint.assertZValue(ignoreZValue, parser.doubleValue()); + alt = parser.doubleValue(); parser.nextToken(); } // do not support > 3 dimensions diff --git a/server/src/main/java/org/elasticsearch/common/geo/GeometryParser.java b/server/src/main/java/org/elasticsearch/common/geo/GeometryParser.java index fe06c3a9c33..e58372d8255 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/GeometryParser.java +++ b/server/src/main/java/org/elasticsearch/common/geo/GeometryParser.java @@ -22,6 +22,8 @@ package org.elasticsearch.common.geo; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.geo.geometry.Geometry; +import org.elasticsearch.geo.utils.GeographyValidator; +import org.elasticsearch.geo.utils.GeometryValidator; import org.elasticsearch.geo.utils.WellKnownText; import java.io.IOException; @@ -34,10 +36,12 @@ public final class GeometryParser { private final GeoJson geoJsonParser; private final WellKnownText wellKnownTextParser; + private final GeometryValidator validator; public GeometryParser(boolean rightOrientation, boolean coerce, boolean ignoreZValue) { - geoJsonParser = new GeoJson(rightOrientation, coerce, ignoreZValue); - wellKnownTextParser = new WellKnownText(coerce, ignoreZValue); + validator = new GeographyValidator(ignoreZValue); + geoJsonParser = new GeoJson(rightOrientation, coerce, validator); + wellKnownTextParser = new WellKnownText(coerce, validator); } /** @@ -50,7 +54,6 @@ public final class GeometryParser { } else if (parser.currentToken() == XContentParser.Token.START_OBJECT) { return geoJsonParser.fromXContent(parser); } else if (parser.currentToken() == XContentParser.Token.VALUE_STRING) { - // TODO: Add support for ignoreZValue and coerce to WKT return wellKnownTextParser.fromWKT(parser.text()); } throw new ElasticsearchParseException("shape must be an object consisting of type and coordinates"); diff --git a/server/src/test/java/org/elasticsearch/common/geo/BaseGeoParsingTestCase.java b/server/src/test/java/org/elasticsearch/common/geo/BaseGeoParsingTestCase.java index d19f7934c65..9548d14cca9 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/BaseGeoParsingTestCase.java +++ b/server/src/test/java/org/elasticsearch/common/geo/BaseGeoParsingTestCase.java @@ -21,6 +21,7 @@ package org.elasticsearch.common.geo; import org.elasticsearch.common.geo.parsers.ShapeParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.geo.utils.GeographyValidator; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.hamcrest.ElasticsearchGeoAssertions; import org.locationtech.jts.geom.Geometry; @@ -70,7 +71,7 @@ abstract class BaseGeoParsingTestCase extends ESTestCase { protected void assertGeometryEquals(org.elasticsearch.geo.geometry.Geometry expected, XContentBuilder geoJson) throws IOException { try (XContentParser parser = createParser(geoJson)) { parser.nextToken(); - assertEquals(expected, new GeoJson(true, false, false).fromXContent(parser)); + assertEquals(expected, new GeoJson(true, false, new GeographyValidator(false)).fromXContent(parser)); } } diff --git a/server/src/test/java/org/elasticsearch/common/geo/GeoDistanceTests.java b/server/src/test/java/org/elasticsearch/common/geo/GeoDistanceTests.java index 7442d3c8d80..dfc01e4c64e 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/GeoDistanceTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/GeoDistanceTests.java @@ -21,7 +21,6 @@ package org.elasticsearch.common.geo; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.unit.DistanceUnit; -import org.elasticsearch.geo.geometry.Rectangle; import org.elasticsearch.test.ESTestCase; import java.io.IOException; @@ -67,22 +66,6 @@ public class GeoDistanceTests extends ESTestCase { } } - public void testDistanceCheck() { - // Note, is within is an approximation, so, even though 0.52 is outside 50mi, we still get "true" - double radius = DistanceUnit.convert(50, DistanceUnit.MILES, DistanceUnit.METERS); - org.apache.lucene.geo.Rectangle r = org.apache.lucene.geo.Rectangle.fromPointDistance(0, 0, radius); - Rectangle box = new Rectangle(r.minLat, r.maxLat, r.minLon, r.maxLon); - assertThat(box.containsPoint(0.5, 0.5), equalTo(true)); - assertThat(box.containsPoint(0.52, 0.52), equalTo(true)); - assertThat(box.containsPoint(1, 1), equalTo(false)); - - radius = DistanceUnit.convert(200, DistanceUnit.MILES, DistanceUnit.METERS); - r = org.apache.lucene.geo.Rectangle.fromPointDistance(0, 179, radius); - box = new Rectangle(r.minLat, r.maxLat, r.minLon, r.maxLon); - assertThat(box.containsPoint(0, -179), equalTo(true)); - assertThat(box.containsPoint(0, -178), equalTo(false)); - } - private static double arcDistance(GeoPoint p1, GeoPoint p2) { return GeoDistance.ARC.calculate(p1.lat(), p1.lon(), p2.lat(), p2.lon(), DistanceUnit.METERS); } diff --git a/server/src/test/java/org/elasticsearch/common/geo/GeoJsonParserTests.java b/server/src/test/java/org/elasticsearch/common/geo/GeoJsonParserTests.java index e095c7e381a..4146adb2d29 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/GeoJsonParserTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/GeoJsonParserTests.java @@ -36,6 +36,7 @@ import org.elasticsearch.geo.geometry.MultiPolygon; import org.elasticsearch.geo.geometry.Point; import org.elasticsearch.geo.geometry.Polygon; import org.elasticsearch.geo.geometry.Rectangle; +import org.elasticsearch.geo.utils.GeographyValidator; import java.io.IOException; import java.util.Arrays; @@ -72,7 +73,7 @@ public class GeoJsonParserTests extends BaseGeoParsingTestCase { Line expected = new Line(new double[] {0.0, 1.0}, new double[] { 100.0, 101.0}); try (XContentParser parser = createParser(lineGeoJson)) { parser.nextToken(); - assertEquals(expected, new GeoJson(false, false, true).fromXContent(parser)); + assertEquals(expected, new GeoJson(false, false, new GeographyValidator(true)).fromXContent(parser)); } } @@ -124,7 +125,7 @@ public class GeoJsonParserTests extends BaseGeoParsingTestCase { try (XContentParser parser = createParser(pointGeoJson)) { parser.nextToken(); - expectThrows(XContentParseException.class, () -> new GeoJson(false, false, false).fromXContent(parser)); + expectThrows(XContentParseException.class, () -> new GeoJson(false, false, new GeographyValidator(false)).fromXContent(parser)); assertNull(parser.nextToken()); } @@ -140,7 +141,7 @@ public class GeoJsonParserTests extends BaseGeoParsingTestCase { try (XContentParser parser = createParser(lineGeoJson)) { parser.nextToken(); - expectThrows(XContentParseException.class, () -> new GeoJson(false, false, false).fromXContent(parser)); + expectThrows(XContentParseException.class, () -> new GeoJson(false, false, new GeographyValidator(false)).fromXContent(parser)); assertNull(parser.nextToken()); } } @@ -178,7 +179,7 @@ public class GeoJsonParserTests extends BaseGeoParsingTestCase { .endObject(); try (XContentParser parser = createParser(multilinesGeoJson)) { parser.nextToken(); - expectThrows(XContentParseException.class, () -> new GeoJson(false, false, false).fromXContent(parser)); + expectThrows(XContentParseException.class, () -> new GeoJson(false, false, new GeographyValidator(false)).fromXContent(parser)); assertNull(parser.nextToken()); } @@ -189,7 +190,7 @@ public class GeoJsonParserTests extends BaseGeoParsingTestCase { .endObject(); try (XContentParser parser = createParser(multilinesGeoJson)) { parser.nextToken(); - expectThrows(XContentParseException.class, () -> new GeoJson(false, false, false).fromXContent(parser)); + expectThrows(XContentParseException.class, () -> new GeoJson(false, false, new GeographyValidator(false)).fromXContent(parser)); assertNull(parser.nextToken()); } } @@ -239,7 +240,7 @@ public class GeoJsonParserTests extends BaseGeoParsingTestCase { )); try (XContentParser parser = createParser(polygonGeoJson)) { parser.nextToken(); - assertEquals(expected, new GeoJson(true, false, true).fromXContent(parser)); + assertEquals(expected, new GeoJson(true, false, new GeographyValidator(true)).fromXContent(parser)); } } @@ -259,7 +260,7 @@ public class GeoJsonParserTests extends BaseGeoParsingTestCase { .endObject(); try (XContentParser parser = createParser(polygonGeoJson)) { parser.nextToken(); - expectThrows(XContentParseException.class, () -> new GeoJson(true, false, true).fromXContent(parser)); + expectThrows(XContentParseException.class, () -> new GeoJson(true, false, new GeographyValidator(true)).fromXContent(parser)); assertNull(parser.nextToken()); } } @@ -275,7 +276,7 @@ public class GeoJsonParserTests extends BaseGeoParsingTestCase { .endObject(); try (XContentParser parser = createParser(invalidPoint1)) { parser.nextToken(); - expectThrows(XContentParseException.class, () -> new GeoJson(true, false, false).fromXContent(parser)); + expectThrows(XContentParseException.class, () -> new GeoJson(true, false, new GeographyValidator(false)).fromXContent(parser)); assertNull(parser.nextToken()); } @@ -288,7 +289,7 @@ public class GeoJsonParserTests extends BaseGeoParsingTestCase { .endObject(); try (XContentParser parser = createParser(invalidPoint2)) { parser.nextToken(); - expectThrows(XContentParseException.class, () -> new GeoJson(true, false, false).fromXContent(parser)); + expectThrows(XContentParseException.class, () -> new GeoJson(true, false, new GeographyValidator(false)).fromXContent(parser)); assertNull(parser.nextToken()); } } @@ -302,7 +303,7 @@ public class GeoJsonParserTests extends BaseGeoParsingTestCase { .endObject(); try (XContentParser parser = createParser(invalidMultipoint1)) { parser.nextToken(); - expectThrows(XContentParseException.class, () -> new GeoJson(true, false, false).fromXContent(parser)); + expectThrows(XContentParseException.class, () -> new GeoJson(true, false, new GeographyValidator(false)).fromXContent(parser)); assertNull(parser.nextToken()); } @@ -315,7 +316,7 @@ public class GeoJsonParserTests extends BaseGeoParsingTestCase { .endObject(); try (XContentParser parser = createParser(invalidMultipoint2)) { parser.nextToken(); - expectThrows(XContentParseException.class, () -> new GeoJson(true, false, false).fromXContent(parser)); + expectThrows(XContentParseException.class, () -> new GeoJson(true, false, new GeographyValidator(false)).fromXContent(parser)); assertNull(parser.nextToken()); } @@ -329,7 +330,7 @@ public class GeoJsonParserTests extends BaseGeoParsingTestCase { .endObject(); try (XContentParser parser = createParser(invalidMultipoint3)) { parser.nextToken(); - expectThrows(XContentParseException.class, () -> new GeoJson(true, false, false).fromXContent(parser)); + expectThrows(XContentParseException.class, () -> new GeoJson(true, false, new GeographyValidator(false)).fromXContent(parser)); assertNull(parser.nextToken()); } } @@ -370,7 +371,7 @@ public class GeoJsonParserTests extends BaseGeoParsingTestCase { try (XContentParser parser = createParser(JsonXContent.jsonXContent, multiPolygonGeoJson)) { parser.nextToken(); - expectThrows(XContentParseException.class, () -> new GeoJson(true, false, false).fromXContent(parser)); + expectThrows(XContentParseException.class, () -> new GeoJson(true, false, new GeographyValidator(false)).fromXContent(parser)); assertNull(parser.nextToken()); } } @@ -391,7 +392,7 @@ public class GeoJsonParserTests extends BaseGeoParsingTestCase { .endObject()); try (XContentParser parser = createParser(JsonXContent.jsonXContent, invalidPoly)) { parser.nextToken(); - expectThrows(XContentParseException.class, () -> new GeoJson(true, false, false).fromXContent(parser)); + expectThrows(XContentParseException.class, () -> new GeoJson(true, false, new GeographyValidator(false)).fromXContent(parser)); assertNull(parser.nextToken()); } @@ -406,7 +407,7 @@ public class GeoJsonParserTests extends BaseGeoParsingTestCase { try (XContentParser parser = createParser(JsonXContent.jsonXContent, invalidPoly)) { parser.nextToken(); - expectThrows(XContentParseException.class, () -> new GeoJson(true, false, false).fromXContent(parser)); + expectThrows(XContentParseException.class, () -> new GeoJson(true, false, new GeographyValidator(false)).fromXContent(parser)); assertNull(parser.nextToken()); } @@ -421,7 +422,7 @@ public class GeoJsonParserTests extends BaseGeoParsingTestCase { try (XContentParser parser = createParser(JsonXContent.jsonXContent, invalidPoly)) { parser.nextToken(); - expectThrows(XContentParseException.class, () -> new GeoJson(true, false, false).fromXContent(parser)); + expectThrows(XContentParseException.class, () -> new GeoJson(true, false, new GeographyValidator(false)).fromXContent(parser)); assertNull(parser.nextToken()); } @@ -436,7 +437,7 @@ public class GeoJsonParserTests extends BaseGeoParsingTestCase { try (XContentParser parser = createParser(JsonXContent.jsonXContent, invalidPoly)) { parser.nextToken(); - expectThrows(XContentParseException.class, () -> new GeoJson(true, false, false).fromXContent(parser)); + expectThrows(XContentParseException.class, () -> new GeoJson(true, false, new GeographyValidator(false)).fromXContent(parser)); assertNull(parser.nextToken()); } @@ -449,7 +450,7 @@ public class GeoJsonParserTests extends BaseGeoParsingTestCase { try (XContentParser parser = createParser(JsonXContent.jsonXContent, invalidPoly)) { parser.nextToken(); - expectThrows(XContentParseException.class, () -> new GeoJson(true, false, false).fromXContent(parser)); + expectThrows(XContentParseException.class, () -> new GeoJson(true, false, new GeographyValidator(false)).fromXContent(parser)); assertNull(parser.nextToken()); } @@ -460,7 +461,7 @@ public class GeoJsonParserTests extends BaseGeoParsingTestCase { try (XContentParser parser = createParser(JsonXContent.jsonXContent, invalidPoly)) { parser.nextToken(); - expectThrows(XContentParseException.class, () -> new GeoJson(true, false, false).fromXContent(parser)); + expectThrows(XContentParseException.class, () -> new GeoJson(true, false, new GeographyValidator(false)).fromXContent(parser)); assertNull(parser.nextToken()); } @@ -473,7 +474,7 @@ public class GeoJsonParserTests extends BaseGeoParsingTestCase { try (XContentParser parser = createParser(JsonXContent.jsonXContent, invalidPoly)) { parser.nextToken(); - expectThrows(XContentParseException.class, () -> new GeoJson(true, false, false).fromXContent(parser)); + expectThrows(XContentParseException.class, () -> new GeoJson(true, false, new GeographyValidator(false)).fromXContent(parser)); assertNull(parser.nextToken()); } } @@ -710,7 +711,7 @@ public class GeoJsonParserTests extends BaseGeoParsingTestCase { try (XContentParser parser = createParser(tooLittlePointGeoJson)) { parser.nextToken(); - expectThrows(XContentParseException.class, () -> new GeoJson(true, false, false).fromXContent(parser)); + expectThrows(XContentParseException.class, () -> new GeoJson(true, false, new GeographyValidator(false)).fromXContent(parser)); assertNull(parser.nextToken()); } @@ -723,7 +724,7 @@ public class GeoJsonParserTests extends BaseGeoParsingTestCase { try (XContentParser parser = createParser(emptyPointGeoJson)) { parser.nextToken(); - expectThrows(XContentParseException.class, () -> new GeoJson(true, false, false).fromXContent(parser)); + expectThrows(XContentParseException.class, () -> new GeoJson(true, false, new GeographyValidator(false)).fromXContent(parser)); assertNull(parser.nextToken()); } } @@ -749,7 +750,7 @@ public class GeoJsonParserTests extends BaseGeoParsingTestCase { parser.nextToken(); // foo parser.nextToken(); // start object parser.nextToken(); // start object - expectThrows(XContentParseException.class, () -> new GeoJson(true, false, false).fromXContent(parser)); + expectThrows(XContentParseException.class, () -> new GeoJson(true, false, new GeographyValidator(false)).fromXContent(parser)); assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken()); // end of the document assertNull(parser.nextToken()); // no more elements afterwards } diff --git a/server/src/test/java/org/elasticsearch/common/geo/GeoJsonSerializationTests.java b/server/src/test/java/org/elasticsearch/common/geo/GeoJsonSerializationTests.java index b0ee969119c..46766b4e11f 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/GeoJsonSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/GeoJsonSerializationTests.java @@ -34,6 +34,7 @@ import org.elasticsearch.geo.geometry.MultiPolygon; import org.elasticsearch.geo.geometry.Point; import org.elasticsearch.geo.geometry.Polygon; import org.elasticsearch.geo.geometry.Rectangle; +import org.elasticsearch.geo.utils.GeographyValidator; import org.elasticsearch.test.AbstractXContentTestCase; import org.elasticsearch.test.ESTestCase; @@ -49,7 +50,7 @@ public class GeoJsonSerializationTests extends ESTestCase { private static class GeometryWrapper implements ToXContentObject { private Geometry geometry; - private static GeoJson PARSER = new GeoJson(true, false, true); + private static GeoJson PARSER = new GeoJson(true, false, new GeographyValidator(true)); GeometryWrapper(Geometry geometry) { this.geometry = geometry; diff --git a/server/src/test/java/org/elasticsearch/common/geo/GeometryParserTests.java b/server/src/test/java/org/elasticsearch/common/geo/GeometryParserTests.java index 0d2b182741f..13b3f8f67b3 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/GeometryParserTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/GeometryParserTests.java @@ -61,7 +61,7 @@ public class GeometryParserTests extends ESTestCase { try (XContentParser parser = createParser(pointGeoJsonWithZ)) { parser.nextToken(); - expectThrows(XContentParseException.class, () -> new GeometryParser(true, randomBoolean(), false).parse(parser)); + expectThrows(IllegalArgumentException.class, () -> new GeometryParser(true, randomBoolean(), false).parse(parser)); } XContentBuilder polygonGeoJson = XContentFactory.jsonBuilder() diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/ExtractedField.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/ExtractedField.java index 5fd8f70e369..e2976951524 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/ExtractedField.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/ExtractedField.java @@ -9,6 +9,7 @@ import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.geo.geometry.Geometry; import org.elasticsearch.geo.geometry.Point; import org.elasticsearch.geo.geometry.ShapeType; +import org.elasticsearch.geo.utils.GeographyValidator; import org.elasticsearch.geo.utils.WellKnownText; import org.elasticsearch.search.SearchHit; @@ -126,7 +127,7 @@ public abstract class ExtractedField { } private static class GeoShapeField extends FromSource { - private static final WellKnownText wkt = new WellKnownText(true, true); + private static final WellKnownText wkt = new WellKnownText(true, new GeographyValidator(true)); GeoShapeField(String alias, String name) { super(alias, name); diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/TypeConverter.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/TypeConverter.java index bb66a104189..0a1c0826695 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/TypeConverter.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/TypeConverter.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.sql.jdbc; +import org.elasticsearch.geo.utils.GeographyValidator; import org.elasticsearch.geo.utils.WellKnownText; import org.elasticsearch.xpack.sql.proto.StringUtils; @@ -54,7 +55,7 @@ import static org.elasticsearch.xpack.sql.jdbc.JdbcDateUtils.timeAsTime; */ final class TypeConverter { - private static WellKnownText WKT = new WellKnownText(true, true); + private static WellKnownText WKT = new WellKnownText(true, new GeographyValidator(true)); private TypeConverter() {} diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcAssert.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcAssert.java index 47a14e180fd..256d7cb612c 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcAssert.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcAssert.java @@ -10,6 +10,7 @@ import com.carrotsearch.hppc.IntObjectHashMap; import org.apache.logging.log4j.Logger; import org.elasticsearch.geo.geometry.Geometry; import org.elasticsearch.geo.geometry.Point; +import org.elasticsearch.geo.utils.GeographyValidator; import org.elasticsearch.geo.utils.WellKnownText; import org.elasticsearch.xpack.sql.jdbc.EsType; import org.elasticsearch.xpack.sql.proto.StringUtils; @@ -51,7 +52,7 @@ public class JdbcAssert { private static final IntObjectHashMap SQL_TO_TYPE = new IntObjectHashMap<>(); - private static final WellKnownText WKT = new WellKnownText(true, true); + private static final WellKnownText WKT = new WellKnownText(true, new GeographyValidator(true)); static { for (EsType type : EsType.values()) { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/GeoShape.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/GeoShape.java index f9a025ea4f0..1c3d1e7c935 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/GeoShape.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/GeoShape.java @@ -29,6 +29,8 @@ import org.elasticsearch.geo.geometry.MultiPolygon; import org.elasticsearch.geo.geometry.Point; import org.elasticsearch.geo.geometry.Polygon; import org.elasticsearch.geo.geometry.Rectangle; +import org.elasticsearch.geo.utils.GeographyValidator; +import org.elasticsearch.geo.utils.GeometryValidator; import org.elasticsearch.geo.utils.WellKnownText; import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; @@ -49,9 +51,11 @@ public class GeoShape implements ToXContentFragment, NamedWriteable { private final Geometry shape; + private static final GeometryValidator validator = new GeographyValidator(true); + private static final GeometryParser GEOMETRY_PARSER = new GeometryParser(true, true, true); - private static final WellKnownText WKT_PARSER = new WellKnownText(true, true); + private static final WellKnownText WKT_PARSER = new WellKnownText(true, validator); public GeoShape(double lon, double lat) { shape = new Point(lat, lon); From 34a86cc321c6841a89fcf74ce26c7cc78b627896 Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Thu, 27 Jun 2019 20:42:43 -0500 Subject: [PATCH 18/42] [ML] Allowing stopped status in HLRC testStartStop (#43710) (#43719) --- .../elasticsearch/client/DataFrameTransformIT.java | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java index 45c3038b662..5ec2265d045 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java @@ -258,8 +258,10 @@ public class DataFrameTransformIT extends ESRestHighLevelClientTestCase { GetDataFrameTransformStatsResponse statsResponse = execute(new GetDataFrameTransformStatsRequest(id), client::getDataFrameTransformStats, client::getDataFrameTransformStatsAsync); assertThat(statsResponse.getTransformsStateAndStats(), hasSize(1)); - IndexerState indexerState = statsResponse.getTransformsStateAndStats().get(0).getTransformState().getIndexerState(); - assertThat(indexerState, is(oneOf(IndexerState.STARTED, IndexerState.INDEXING))); + DataFrameTransformTaskState taskState = statsResponse.getTransformsStateAndStats().get(0).getTransformState().getTaskState(); + + // Since we are non-continuous, the transform could auto-stop between being started earlier and us gathering the statistics + assertThat(taskState, is(oneOf(DataFrameTransformTaskState.STARTED, DataFrameTransformTaskState.STOPPED))); StopDataFrameTransformRequest stopRequest = new StopDataFrameTransformRequest(id, Boolean.TRUE, null); StopDataFrameTransformResponse stopResponse = @@ -267,6 +269,12 @@ public class DataFrameTransformIT extends ESRestHighLevelClientTestCase { assertTrue(stopResponse.isAcknowledged()); assertThat(stopResponse.getNodeFailures(), empty()); assertThat(stopResponse.getTaskFailures(), empty()); + + // Calling stop with wait_for_completion assures that we will be in the `STOPPED` state for the transform task + statsResponse = execute(new GetDataFrameTransformStatsRequest(id), + client::getDataFrameTransformStats, client::getDataFrameTransformStatsAsync); + taskState = statsResponse.getTransformsStateAndStats().get(0).getTransformState().getTaskState(); + assertThat(taskState, is(DataFrameTransformTaskState.STOPPED)); } public void testPreview() throws IOException { From 5b4089e57e5a50871512afb7b42adb6a0354965f Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 27 Jun 2019 18:45:14 -0700 Subject: [PATCH 19/42] Remove nodeId from BaseNodeRequest (#43658) TransportNodesAction provides a mechanism to easily broadcast a request to many nodes, and collect the respones into a high level response. Each node has its own request type, with a base class of BaseNodeRequest. This base request requires passing the nodeId to which the request will be sent. However, that nodeId is not used anywhere. It is private to the base class, yet serialized to each node, where the node could just as easily find the nodeId of the node it is on locally. This commit removes passing the nodeId through to the node request creation, and guards its serialization so that we can remove the base request class altogether in the future. --- .../TransportNodesHotThreadsAction.java | 7 +++--- .../node/info/TransportNodesInfoAction.java | 7 +++--- ...nsportNodesReloadSecureSettingsAction.java | 7 +++--- .../node/stats/TransportNodesStatsAction.java | 7 +++--- .../node/usage/TransportNodesUsageAction.java | 7 +++--- .../status/TransportNodesSnapshotsStatus.java | 7 +++--- .../stats/TransportClusterStatsAction.java | 7 +++--- .../action/support/nodes/BaseNodeRequest.java | 20 ++++++++-------- .../support/nodes/TransportNodesAction.java | 4 ++-- .../TransportNodesListGatewayMetaState.java | 12 ++-------- ...ransportNodesListGatewayStartedShards.java | 7 +++--- .../TransportNodesListShardStoreMetaData.java | 7 +++--- .../node/tasks/CancellableTasksTests.java | 23 ++++++++----------- .../cluster/node/tasks/TestTaskPlugin.java | 13 ++++------- .../node/tasks/TransportTasksActionTests.java | 7 +++--- .../nodes/TransportNodesActionTests.java | 2 +- .../NodesDeprecationCheckAction.java | 3 +-- .../action/realm/ClearRealmCacheRequest.java | 3 +-- .../action/role/ClearRolesCacheRequest.java | 3 +-- .../actions/stats/WatcherStatsRequest.java | 3 +-- .../TransportNodeDeprecationCheckAction.java | 4 ++-- .../realm/TransportClearRealmCacheAction.java | 4 ++-- .../role/TransportClearRolesCacheAction.java | 4 ++-- .../xpack/sql/plugin/SqlStatsRequest.java | 3 +-- .../sql/plugin/TransportSqlStatsAction.java | 4 ++-- .../stats/TransportWatcherStatsAction.java | 4 ++-- .../TransportWatcherStatsActionTests.java | 4 ++-- 27 files changed, 74 insertions(+), 109 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java index 6321813f189..c1a5a8c431e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java @@ -54,8 +54,8 @@ public class TransportNodesHotThreadsAction extends TransportNodesAction responses, List failures); - protected abstract NodeRequest newNodeRequest(String nodeId, NodesRequest request); + protected abstract NodeRequest newNodeRequest(NodesRequest request); protected abstract NodeResponse newNodeResponse(); @@ -174,7 +174,7 @@ public abstract class TransportNodesAction { public Node() {} - public Node(WatcherStatsRequest request, String nodeId) { - super(nodeId); + public Node(WatcherStatsRequest request) { includeCurrentWatches = request.includeCurrentWatches(); includeQueuedWatches = request.includeQueuedWatches(); includeStats = request.includeStats(); diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/TransportNodeDeprecationCheckAction.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/TransportNodeDeprecationCheckAction.java index a315559a2f9..964aae63359 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/TransportNodeDeprecationCheckAction.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/TransportNodeDeprecationCheckAction.java @@ -52,8 +52,8 @@ public class TransportNodeDeprecationCheckAction extends TransportNodesAction { NodeStatsRequest() {} - NodeStatsRequest(SqlStatsRequest request, String nodeId) { - super(nodeId); + NodeStatsRequest(SqlStatsRequest request) { includeStats = request.includeStats(); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlStatsAction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlStatsAction.java index 815e45175e9..cdd29d11424 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlStatsAction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlStatsAction.java @@ -41,8 +41,8 @@ public class TransportSqlStatsAction extends TransportNodesAction Date: Fri, 28 Jun 2019 07:37:21 +0200 Subject: [PATCH 20/42] Add version and create_time to data frame analytics config (#43683) (#43712) --- .../ml/PutDataFrameAnalyticsRequest.java | 6 ++ .../dataframe/DataFrameAnalyticsConfig.java | 69 ++++++++++++-- .../DataFrameAnalyticsConfigTests.java | 8 ++ .../dataframe/DataFrameAnalyticsConfig.java | 89 +++++++++++++++++-- .../persistence/ElasticsearchMappings.java | 8 ++ .../ml/job/results/ReservedFieldNames.java | 2 + .../DataFrameAnalyticsConfigTests.java | 71 +++++++++++++-- .../ml/qa/ml-with-security/build.gradle | 2 + .../TransportPutDataFrameAnalyticsAction.java | 8 +- .../test/data_frame/transforms_crud.yml | 38 ++++++++ .../test/ml/data_frame_analytics_crud.yml | 57 ++++++++++++ 11 files changed, 335 insertions(+), 23 deletions(-) diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PutDataFrameAnalyticsRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PutDataFrameAnalyticsRequest.java index 14950a74c91..2624b68a983 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PutDataFrameAnalyticsRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PutDataFrameAnalyticsRequest.java @@ -22,6 +22,7 @@ package org.elasticsearch.client.ml; import org.elasticsearch.client.Validatable; import org.elasticsearch.client.ValidationException; import org.elasticsearch.client.ml.dataframe.DataFrameAnalyticsConfig; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -67,4 +68,9 @@ public class PutDataFrameAnalyticsRequest implements ToXContentObject, Validatab public int hashCode() { return Objects.hash(config); } + + @Override + public String toString() { + return Strings.toString(this); + } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsConfig.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsConfig.java index b1309e66afc..62adb062945 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsConfig.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsConfig.java @@ -19,11 +19,14 @@ package org.elasticsearch.client.ml.dataframe; +import org.elasticsearch.Version; +import org.elasticsearch.client.dataframe.transforms.util.TimeUtil; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser.ValueType; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -31,11 +34,9 @@ import org.elasticsearch.common.xcontent.XContentParserUtils; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import java.io.IOException; +import java.time.Instant; import java.util.Objects; -import static org.elasticsearch.common.xcontent.ObjectParser.ValueType.OBJECT_ARRAY_BOOLEAN_OR_STRING; -import static org.elasticsearch.common.xcontent.ObjectParser.ValueType.VALUE; - public class DataFrameAnalyticsConfig implements ToXContentObject { public static DataFrameAnalyticsConfig fromXContent(XContentParser parser) { @@ -52,6 +53,8 @@ public class DataFrameAnalyticsConfig implements ToXContentObject { private static final ParseField ANALYSIS = new ParseField("analysis"); private static final ParseField ANALYZED_FIELDS = new ParseField("analyzed_fields"); private static final ParseField MODEL_MEMORY_LIMIT = new ParseField("model_memory_limit"); + private static final ParseField CREATE_TIME = new ParseField("create_time"); + private static final ParseField VERSION = new ParseField("version"); private static ObjectParser PARSER = new ObjectParser<>("data_frame_analytics_config", true, Builder::new); @@ -63,9 +66,24 @@ public class DataFrameAnalyticsConfig implements ToXContentObject { PARSER.declareField(Builder::setAnalyzedFields, (p, c) -> FetchSourceContext.fromXContent(p), ANALYZED_FIELDS, - OBJECT_ARRAY_BOOLEAN_OR_STRING); + ValueType.OBJECT_ARRAY_BOOLEAN_OR_STRING); PARSER.declareField(Builder::setModelMemoryLimit, - (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), MODEL_MEMORY_LIMIT.getPreferredName()), MODEL_MEMORY_LIMIT, VALUE); + (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), MODEL_MEMORY_LIMIT.getPreferredName()), + MODEL_MEMORY_LIMIT, + ValueType.VALUE); + PARSER.declareField(Builder::setCreateTime, + p -> TimeUtil.parseTimeFieldToInstant(p, CREATE_TIME.getPreferredName()), + CREATE_TIME, + ValueType.VALUE); + PARSER.declareField(Builder::setVersion, + p -> { + if (p.currentToken() == XContentParser.Token.VALUE_STRING) { + return Version.fromString(p.text()); + } + throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]"); + }, + VERSION, + ValueType.STRING); } private static DataFrameAnalysis parseAnalysis(XContentParser parser) throws IOException { @@ -82,15 +100,20 @@ public class DataFrameAnalyticsConfig implements ToXContentObject { private final DataFrameAnalysis analysis; private final FetchSourceContext analyzedFields; private final ByteSizeValue modelMemoryLimit; + private final Instant createTime; + private final Version version; private DataFrameAnalyticsConfig(String id, DataFrameAnalyticsSource source, DataFrameAnalyticsDest dest, DataFrameAnalysis analysis, - @Nullable FetchSourceContext analyzedFields, @Nullable ByteSizeValue modelMemoryLimit) { + @Nullable FetchSourceContext analyzedFields, @Nullable ByteSizeValue modelMemoryLimit, + @Nullable Instant createTime, @Nullable Version version) { this.id = Objects.requireNonNull(id); this.source = Objects.requireNonNull(source); this.dest = Objects.requireNonNull(dest); this.analysis = Objects.requireNonNull(analysis); this.analyzedFields = analyzedFields; this.modelMemoryLimit = modelMemoryLimit; + this.createTime = createTime == null ? null : Instant.ofEpochMilli(createTime.toEpochMilli());; + this.version = version; } public String getId() { @@ -117,6 +140,14 @@ public class DataFrameAnalyticsConfig implements ToXContentObject { return modelMemoryLimit; } + public Instant getCreateTime() { + return createTime; + } + + public Version getVersion() { + return version; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); @@ -132,6 +163,12 @@ public class DataFrameAnalyticsConfig implements ToXContentObject { if (modelMemoryLimit != null) { builder.field(MODEL_MEMORY_LIMIT.getPreferredName(), modelMemoryLimit.getStringRep()); } + if (createTime != null) { + builder.timeField(CREATE_TIME.getPreferredName(), CREATE_TIME.getPreferredName() + "_string", createTime.toEpochMilli()); + } + if (version != null) { + builder.field(VERSION.getPreferredName(), version); + } builder.endObject(); return builder; } @@ -147,12 +184,14 @@ public class DataFrameAnalyticsConfig implements ToXContentObject { && Objects.equals(dest, other.dest) && Objects.equals(analysis, other.analysis) && Objects.equals(analyzedFields, other.analyzedFields) - && Objects.equals(modelMemoryLimit, other.modelMemoryLimit); + && Objects.equals(modelMemoryLimit, other.modelMemoryLimit) + && Objects.equals(createTime, other.createTime) + && Objects.equals(version, other.version); } @Override public int hashCode() { - return Objects.hash(id, source, dest, analysis, analyzedFields, getModelMemoryLimit()); + return Objects.hash(id, source, dest, analysis, analyzedFields, modelMemoryLimit, createTime, version); } @Override @@ -168,6 +207,8 @@ public class DataFrameAnalyticsConfig implements ToXContentObject { private DataFrameAnalysis analysis; private FetchSourceContext analyzedFields; private ByteSizeValue modelMemoryLimit; + private Instant createTime; + private Version version; private Builder() {} @@ -201,8 +242,18 @@ public class DataFrameAnalyticsConfig implements ToXContentObject { return this; } + public Builder setCreateTime(Instant createTime) { + this.createTime = createTime; + return this; + } + + public Builder setVersion(Version version) { + this.version = version; + return this; + } + public DataFrameAnalyticsConfig build() { - return new DataFrameAnalyticsConfig(id, source, dest, analysis, analyzedFields, modelMemoryLimit); + return new DataFrameAnalyticsConfig(id, source, dest, analysis, analyzedFields, modelMemoryLimit, createTime, version); } } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsConfigTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsConfigTests.java index 4eba6424010..fa8df5bfee9 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsConfigTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsConfigTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.client.ml.dataframe; +import org.elasticsearch.Version; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -29,6 +30,7 @@ import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import org.elasticsearch.test.AbstractXContentTestCase; import java.io.IOException; +import java.time.Instant; import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -54,6 +56,12 @@ public class DataFrameAnalyticsConfigTests extends AbstractXContentTestCase STRICT_PARSER = createParser(false); public static final ObjectParser LENIENT_PARSER = createParser(true); @@ -69,6 +75,18 @@ public class DataFrameAnalyticsConfig implements ToXContentObject, Writeable { // Headers are not parsed by the strict (config) parser, so headers supplied in the _body_ of a REST request will be rejected. // (For config, headers are explicitly transferred from the auth headers by code in the put data frame actions.) parser.declareObject(Builder::setHeaders, (p, c) -> p.mapStrings(), HEADERS); + // Creation time is set automatically during PUT, so create_time supplied in the _body_ of a REST request will be rejected. + parser.declareField(Builder::setCreateTime, + p -> TimeUtils.parseTimeFieldToInstant(p, CREATE_TIME.getPreferredName()), + CREATE_TIME, + ObjectParser.ValueType.VALUE); + // Version is set automatically during PUT, so version supplied in the _body_ of a REST request will be rejected. + parser.declareField(Builder::setVersion, p -> { + if (p.currentToken() == XContentParser.Token.VALUE_STRING) { + return Version.fromString(p.text()); + } + throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]"); + }, VERSION, ObjectParser.ValueType.STRING); } return parser; } @@ -96,10 +114,12 @@ public class DataFrameAnalyticsConfig implements ToXContentObject, Writeable { */ private final ByteSizeValue modelMemoryLimit; private final Map headers; + private final Instant createTime; + private final Version version; public DataFrameAnalyticsConfig(String id, DataFrameAnalyticsSource source, DataFrameAnalyticsDest dest, DataFrameAnalysis analysis, Map headers, ByteSizeValue modelMemoryLimit, - FetchSourceContext analyzedFields) { + FetchSourceContext analyzedFields, Instant createTime, Version version) { this.id = ExceptionsHelper.requireNonNull(id, ID); this.source = ExceptionsHelper.requireNonNull(source, SOURCE); this.dest = ExceptionsHelper.requireNonNull(dest, DEST); @@ -107,16 +127,25 @@ public class DataFrameAnalyticsConfig implements ToXContentObject, Writeable { this.analyzedFields = analyzedFields; this.modelMemoryLimit = modelMemoryLimit; this.headers = Collections.unmodifiableMap(headers); + this.createTime = createTime == null ? null : Instant.ofEpochMilli(createTime.toEpochMilli());; + this.version = version; } public DataFrameAnalyticsConfig(StreamInput in) throws IOException { - id = in.readString(); - source = new DataFrameAnalyticsSource(in); - dest = new DataFrameAnalyticsDest(in); - analysis = in.readNamedWriteable(DataFrameAnalysis.class); + this.id = in.readString(); + this.source = new DataFrameAnalyticsSource(in); + this.dest = new DataFrameAnalyticsDest(in); + this.analysis = in.readNamedWriteable(DataFrameAnalysis.class); this.analyzedFields = in.readOptionalWriteable(FetchSourceContext::new); this.modelMemoryLimit = in.readOptionalWriteable(ByteSizeValue::new); this.headers = Collections.unmodifiableMap(in.readMap(StreamInput::readString, StreamInput::readString)); + if (in.getVersion().onOrAfter(Version.V_7_3_0)) { + createTime = in.readOptionalInstant(); + version = in.readBoolean() ? Version.readVersion(in) : null; + } else { + createTime = null; + version = null; + } } public String getId() { @@ -147,6 +176,14 @@ public class DataFrameAnalyticsConfig implements ToXContentObject, Writeable { return headers; } + public Instant getCreateTime() { + return createTime; + } + + public Version getVersion() { + return version; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); @@ -168,6 +205,12 @@ public class DataFrameAnalyticsConfig implements ToXContentObject, Writeable { if (headers.isEmpty() == false && params.paramAsBoolean(ToXContentParams.FOR_INTERNAL_STORAGE, false)) { builder.field(HEADERS.getPreferredName(), headers); } + if (createTime != null) { + builder.timeField(CREATE_TIME.getPreferredName(), CREATE_TIME.getPreferredName() + "_string", createTime.toEpochMilli()); + } + if (version != null) { + builder.field(VERSION.getPreferredName(), version); + } builder.endObject(); return builder; } @@ -181,6 +224,15 @@ public class DataFrameAnalyticsConfig implements ToXContentObject, Writeable { out.writeOptionalWriteable(analyzedFields); out.writeOptionalWriteable(modelMemoryLimit); out.writeMap(headers, StreamOutput::writeString, StreamOutput::writeString); + if (out.getVersion().onOrAfter(Version.V_7_3_0)) { + out.writeOptionalInstant(createTime); + if (version != null) { + out.writeBoolean(true); + Version.writeVersion(version, out); + } else { + out.writeBoolean(false); + } + } } @Override @@ -195,12 +247,19 @@ public class DataFrameAnalyticsConfig implements ToXContentObject, Writeable { && Objects.equals(analysis, other.analysis) && Objects.equals(headers, other.headers) && Objects.equals(getModelMemoryLimit(), other.getModelMemoryLimit()) - && Objects.equals(analyzedFields, other.analyzedFields); + && Objects.equals(analyzedFields, other.analyzedFields) + && Objects.equals(createTime, other.createTime) + && Objects.equals(version, other.version); } @Override public int hashCode() { - return Objects.hash(id, source, dest, analysis, headers, getModelMemoryLimit(), analyzedFields); + return Objects.hash(id, source, dest, analysis, headers, getModelMemoryLimit(), analyzedFields, createTime, version); + } + + @Override + public String toString() { + return Strings.toString(this); } public static String documentId(String id) { @@ -217,6 +276,8 @@ public class DataFrameAnalyticsConfig implements ToXContentObject, Writeable { private ByteSizeValue modelMemoryLimit; private ByteSizeValue maxModelMemoryLimit; private Map headers = Collections.emptyMap(); + private Instant createTime; + private Version version; public Builder() {} @@ -243,6 +304,8 @@ public class DataFrameAnalyticsConfig implements ToXContentObject, Writeable { if (config.analyzedFields != null) { this.analyzedFields = new FetchSourceContext(true, config.analyzedFields.includes(), config.analyzedFields.excludes()); } + this.createTime = config.createTime; + this.version = config.version; } public String getId() { @@ -304,9 +367,19 @@ public class DataFrameAnalyticsConfig implements ToXContentObject, Writeable { } } + public Builder setCreateTime(Instant createTime) { + this.createTime = createTime; + return this; + } + + public Builder setVersion(Version version) { + this.version = version; + return this; + } + public DataFrameAnalyticsConfig build() { applyMaxModelMemoryLimit(); - return new DataFrameAnalyticsConfig(id, source, dest, analysis, headers, modelMemoryLimit, analyzedFields); + return new DataFrameAnalyticsConfig(id, source, dest, analysis, headers, modelMemoryLimit, analyzedFields, createTime, version); } } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java index bc69f4b5d5e..75ce2d53315 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java @@ -391,6 +391,10 @@ public class ElasticsearchMappings { .endObject(); } + /** + * {@link DataFrameAnalyticsConfig} mapping. + * Does not include mapping for CREATE_TIME as this mapping is added by {@link #addJobConfigFields} method. + */ public static void addDataFrameAnalyticsFields(XContentBuilder builder) throws IOException { builder.startObject(DataFrameAnalyticsConfig.ID.getPreferredName()) .field(TYPE, KEYWORD) @@ -434,6 +438,10 @@ public class ElasticsearchMappings { .endObject() .endObject() .endObject() + .endObject() + // re-used: CREATE_TIME + .startObject(DataFrameAnalyticsConfig.VERSION.getPreferredName()) + .field(TYPE, KEYWORD) .endObject(); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ReservedFieldNames.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ReservedFieldNames.java index 39036abb693..eff33a37d97 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ReservedFieldNames.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ReservedFieldNames.java @@ -277,6 +277,8 @@ public final class ReservedFieldNames { DataFrameAnalyticsConfig.DEST.getPreferredName(), DataFrameAnalyticsConfig.ANALYSIS.getPreferredName(), DataFrameAnalyticsConfig.ANALYZED_FIELDS.getPreferredName(), + DataFrameAnalyticsConfig.CREATE_TIME.getPreferredName(), + DataFrameAnalyticsConfig.VERSION.getPreferredName(), DataFrameAnalyticsDest.INDEX.getPreferredName(), DataFrameAnalyticsDest.RESULTS_FIELD.getPreferredName(), DataFrameAnalyticsSource.INDEX.getPreferredName(), diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/DataFrameAnalyticsConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/DataFrameAnalyticsConfigTests.java index a5df1f83c3d..518950b675c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/DataFrameAnalyticsConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/DataFrameAnalyticsConfigTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.ml.dataframe; import com.carrotsearch.randomizedtesting.generators.CodepointSetGenerator; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.Version; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.Writeable; @@ -17,6 +18,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; @@ -30,16 +32,18 @@ import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.xpack.core.ml.dataframe.analyses.MlDataFrameAnalysisNamedXContentProvider; import org.elasticsearch.xpack.core.ml.dataframe.analyses.OutlierDetectionTests; import org.elasticsearch.xpack.core.ml.utils.ToXContentParams; +import org.junit.Before; import java.io.IOException; +import java.time.Instant; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.hasEntry; import static org.hamcrest.Matchers.hasSize; @@ -49,7 +53,11 @@ public class DataFrameAnalyticsConfigTests extends AbstractSerializingTestCase dataFrameAnalyticsConfigParser = + lenient + ? DataFrameAnalyticsConfig.LENIENT_PARSER + : DataFrameAnalyticsConfig.STRICT_PARSER; + return dataFrameAnalyticsConfigParser.apply(parser, null).build(); } @Override @@ -70,7 +78,7 @@ public class DataFrameAnalyticsConfigTests extends AbstractSerializingTestCase DataFrameAnalyticsConfig.STRICT_PARSER.apply(parser, null)); + assertThat(e.getMessage(), containsString("unknown field [create_time], parser not found")); + } + } + + public void testPreventVersionInjection() throws IOException { + String json = "{" + + " \"version\" : \"7.3.0\"," + + " \"source\" : {\"index\":\"src\"}," + + " \"dest\" : {\"index\": \"dest\"}," + + "}"; + + try (XContentParser parser = + XContentFactory.xContent(XContentType.JSON).createParser( + xContentRegistry(), DeprecationHandler.THROW_UNSUPPORTED_OPERATION, json)) { + Exception e = expectThrows(IllegalArgumentException.class, () -> DataFrameAnalyticsConfig.STRICT_PARSER.apply(parser, null)); + assertThat(e.getMessage(), containsString("unknown field [version], parser not found")); + } + } + public void assertTooSmall(IllegalArgumentException e) { assertThat(e.getMessage(), is("[model_memory_limit] must be at least [1mb]")); } diff --git a/x-pack/plugin/ml/qa/ml-with-security/build.gradle b/x-pack/plugin/ml/qa/ml-with-security/build.gradle index af500c1dd85..643d736e6b1 100644 --- a/x-pack/plugin/ml/qa/ml-with-security/build.gradle +++ b/x-pack/plugin/ml/qa/ml-with-security/build.gradle @@ -39,6 +39,8 @@ integTest.runner { 'ml/datafeeds_crud/Test put datafeed with security headers in the body', 'ml/datafeeds_crud/Test update datafeed with missing id', 'ml/data_frame_analytics_crud/Test put config with security headers in the body', + 'ml/data_frame_analytics_crud/Test put config with create_time in the body', + 'ml/data_frame_analytics_crud/Test put config with version in the body', 'ml/data_frame_analytics_crud/Test put config with inconsistent body/param ids', 'ml/data_frame_analytics_crud/Test put config with invalid id', 'ml/data_frame_analytics_crud/Test put config with invalid dest index name', diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutDataFrameAnalyticsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutDataFrameAnalyticsAction.java index 0f709b4e166..d8f5dbb469f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutDataFrameAnalyticsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutDataFrameAnalyticsAction.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.ml.action; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; @@ -41,6 +42,7 @@ import org.elasticsearch.xpack.ml.dataframe.SourceDestValidator; import org.elasticsearch.xpack.ml.dataframe.persistence.DataFrameAnalyticsConfigProvider; import java.io.IOException; +import java.time.Instant; import java.util.Objects; import java.util.function.Supplier; @@ -91,7 +93,10 @@ public class TransportPutDataFrameAnalyticsAction } validateConfig(request.getConfig()); DataFrameAnalyticsConfig memoryCappedConfig = - new DataFrameAnalyticsConfig.Builder(request.getConfig(), maxModelMemoryLimit).build(); + new DataFrameAnalyticsConfig.Builder(request.getConfig(), maxModelMemoryLimit) + .setCreateTime(Instant.now()) + .setVersion(Version.CURRENT) + .build(); if (licenseState.isAuthAllowed()) { final String username = securityContext.getUser().principal(); RoleDescriptor.IndicesPrivileges sourceIndexPrivileges = RoleDescriptor.IndicesPrivileges.builder() @@ -156,5 +161,6 @@ public class TransportPutDataFrameAnalyticsAction } config.getDest().validate(); new SourceDestValidator(clusterService.state(), indexNameExpressionResolver).check(config); + } } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_crud.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_crud.yml index 307ecda231b..bfde8128b49 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_crud.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_crud.yml @@ -453,3 +453,41 @@ setup: "aggs": {"avg_response": {"avg": {"field": "responsetime"}}} } } + +--- +"Test put valid config with create_time in the body": + + - do: + catch: /Found \[create_time\], not allowed for strict parsing/ + data_frame.put_data_frame_transform: + transform_id: "airline-transform-with-create-time" + body: > + { + "source": { "index": "airline-data" }, + "dest": { "index": "airline-data-by-airline" }, + "pivot": { + "group_by": { "airline": {"terms": {"field": "airline"}}}, + "aggs": {"avg_response": {"avg": {"field": "responsetime"}}} + }, + "description": "yaml test transform on airline-data", + "create_time": 123456789 + } + +--- +"Test put valid config with version in the body": + + - do: + catch: /Found \[version\], not allowed for strict parsing/ + data_frame.put_data_frame_transform: + transform_id: "airline-transform-with-version" + body: > + { + "source": { "index": "airline-data" }, + "dest": { "index": "airline-data-by-airline" }, + "pivot": { + "group_by": { "airline": {"terms": {"field": "airline"}}}, + "aggs": {"avg_response": {"avg": {"field": "responsetime"}}} + }, + "description": "yaml test transform on airline-data", + "version": "7.3.0" + } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/data_frame_analytics_crud.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/data_frame_analytics_crud.yml index e5a68fb3383..01afb7714f3 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/data_frame_analytics_crud.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/data_frame_analytics_crud.yml @@ -55,6 +55,21 @@ setup: - match: { dest.index: "index-dest" } - match: { analysis: {"outlier_detection":{}} } - match: { analyzed_fields: {"includes" : ["obj1.*", "obj2.*" ], "excludes": [] } } + - is_true: create_time + - is_true: version + + - do: + ml.get_data_frame_analytics: + id: "simple-outlier-detection-with-query" + - match: { count: 1 } + - match: { data_frame_analytics.0.id: "simple-outlier-detection-with-query" } + - match: { data_frame_analytics.0.source.index: "index-source" } + - match: { data_frame_analytics.0.source.query: {"term" : { "user" : "Kimchy"} } } + - match: { data_frame_analytics.0.dest.index: "index-dest" } + - match: { data_frame_analytics.0.analysis: {"outlier_detection":{}} } + - match: { data_frame_analytics.0.analyzed_fields: {"includes" : ["obj1.*", "obj2.*" ], "excludes": [] } } + - is_true: data_frame_analytics.0.create_time + - is_true: data_frame_analytics.0.version --- "Test put config with security headers in the body": @@ -75,6 +90,44 @@ setup: "headers":{ "a_security_header" : "secret" } } +--- +"Test put config with create_time in the body": + + - do: + catch: /unknown field \[create_time\], parser not found/ + ml.put_data_frame_analytics: + id: "data_frame_with_create_time" + body: > + { + "source": { + "index": "index-source" + }, + "dest": { + "index": "index-dest" + }, + "analysis": {"outlier_detection":{}}, + "create_time": 123456789 + } + +--- +"Test put config with version in the body": + + - do: + catch: /unknown field \[version\], parser not found/ + ml.put_data_frame_analytics: + id: "data_frame_with_version" + body: > + { + "source": { + "index": "index-source" + }, + "dest": { + "index": "index-dest" + }, + "analysis": {"outlier_detection":{}}, + "version": "7.3.0" + } + --- "Test put valid config with default outlier detection": @@ -96,6 +149,8 @@ setup: - match: { source.query: {"match_all" : {} } } - match: { dest.index: "index-dest" } - match: { analysis: {"outlier_detection":{}} } + - is_true: create_time + - is_true: version --- "Test put valid config with custom outlier detection": @@ -126,6 +181,8 @@ setup: - match: { analysis.outlier_detection.n_neighbors: 5 } - match: { analysis.outlier_detection.method: "lof" } - match: { analysis.outlier_detection.minimum_score_to_write_feature_influence: 0.0 } + - is_true: create_time + - is_true: version --- "Test put config with inconsistent body/param ids": From 51b230f6ab7d9dee95e6413ddf2c0d0eb2c0e560 Mon Sep 17 00:00:00 2001 From: Alan Woodward Date: Fri, 28 Jun 2019 08:19:00 +0100 Subject: [PATCH 21/42] Fix PreConfiguredTokenFilters getSynonymFilter() implementations (#38839) (#43678) When we added support for TokenFilterFactories to specialise how they were used when parsing synonym files, PreConfiguredTokenFilters were set up to either apply themselves, or be ignored. This behaviour is a leftover from an earlier iteration, and also has an incorrect default. This commit makes preconfigured token filters usable in synonym file parsing by default, and brings those filters that should not be used into line with index-specific filter factories; in indexes created before version 7 we emit a deprecation warning, and we throw an error in indexes created after. Fixes #38793 --- .../analysis/common/CommonAnalysisPlugin.java | 16 ++--- .../common/SynonymsAnalysisTests.java | 72 ++++++++++++++++--- .../analysis/PreConfiguredTokenFilter.java | 58 +++++++++++---- 3 files changed, 115 insertions(+), 31 deletions(-) diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java index ea0b69c678b..94f5de8278f 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java @@ -401,7 +401,7 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin, Scri filters.add(PreConfiguredTokenFilter.singleton("cjk_bigram", false, CJKBigramFilter::new)); filters.add(PreConfiguredTokenFilter.singleton("cjk_width", true, CJKWidthFilter::new)); filters.add(PreConfiguredTokenFilter.singleton("classic", false, ClassicFilter::new)); - filters.add(PreConfiguredTokenFilter.singleton("common_grams", false, + filters.add(PreConfiguredTokenFilter.singleton("common_grams", false, false, input -> new CommonGramsFilter(input, CharArraySet.EMPTY_SET))); filters.add(PreConfiguredTokenFilter.singleton("czech_stem", false, CzechStemFilter::new)); filters.add(PreConfiguredTokenFilter.singleton("decimal_digit", true, DecimalDigitFilter::new)); @@ -422,9 +422,9 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin, Scri DelimitedPayloadTokenFilterFactory.DEFAULT_DELIMITER, DelimitedPayloadTokenFilterFactory.DEFAULT_ENCODER))); filters.add(PreConfiguredTokenFilter.singleton("dutch_stem", false, input -> new SnowballFilter(input, new DutchStemmer()))); - filters.add(PreConfiguredTokenFilter.singleton("edge_ngram", false, input -> + filters.add(PreConfiguredTokenFilter.singleton("edge_ngram", false, false, input -> new EdgeNGramTokenFilter(input, 1))); - filters.add(PreConfiguredTokenFilter.singletonWithVersion("edgeNGram", false, (reader, version) -> { + filters.add(PreConfiguredTokenFilter.singletonWithVersion("edgeNGram", false, false, (reader, version) -> { if (version.onOrAfter(org.elasticsearch.Version.V_7_0_0)) { throw new IllegalArgumentException( "The [edgeNGram] token filter name was deprecated in 6.4 and cannot be used in new indices. " @@ -451,8 +451,8 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin, Scri new LimitTokenCountFilter(input, LimitTokenCountFilterFactory.DEFAULT_MAX_TOKEN_COUNT, LimitTokenCountFilterFactory.DEFAULT_CONSUME_ALL_TOKENS))); - filters.add(PreConfiguredTokenFilter.singleton("ngram", false, reader -> new NGramTokenFilter(reader, 1, 2, false))); - filters.add(PreConfiguredTokenFilter.singletonWithVersion("nGram", false, (reader, version) -> { + filters.add(PreConfiguredTokenFilter.singleton("ngram", false, false, reader -> new NGramTokenFilter(reader, 1, 2, false))); + filters.add(PreConfiguredTokenFilter.singletonWithVersion("nGram", false, false, (reader, version) -> { if (version.onOrAfter(org.elasticsearch.Version.V_7_0_0)) { throw new IllegalArgumentException("The [nGram] token filter name was deprecated in 6.4 and cannot be used in new indices. " + "Please change the filter name to [ngram] instead."); @@ -469,7 +469,7 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin, Scri filters.add(PreConfiguredTokenFilter.singleton("russian_stem", false, input -> new SnowballFilter(input, "Russian"))); filters.add(PreConfiguredTokenFilter.singleton("scandinavian_folding", true, ScandinavianFoldingFilter::new)); filters.add(PreConfiguredTokenFilter.singleton("scandinavian_normalization", true, ScandinavianNormalizationFilter::new)); - filters.add(PreConfiguredTokenFilter.singleton("shingle", false, input -> { + filters.add(PreConfiguredTokenFilter.singleton("shingle", false, false, input -> { TokenStream ts = new ShingleFilter(input); /** * We disable the graph analysis on this token stream @@ -491,14 +491,14 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin, Scri filters.add(PreConfiguredTokenFilter.singleton("type_as_payload", false, TypeAsPayloadTokenFilter::new)); filters.add(PreConfiguredTokenFilter.singleton("unique", false, UniqueTokenFilter::new)); filters.add(PreConfiguredTokenFilter.singleton("uppercase", true, UpperCaseFilter::new)); - filters.add(PreConfiguredTokenFilter.singleton("word_delimiter", false, input -> + filters.add(PreConfiguredTokenFilter.singleton("word_delimiter", false, false, input -> new WordDelimiterFilter(input, WordDelimiterFilter.GENERATE_WORD_PARTS | WordDelimiterFilter.GENERATE_NUMBER_PARTS | WordDelimiterFilter.SPLIT_ON_CASE_CHANGE | WordDelimiterFilter.SPLIT_ON_NUMERICS | WordDelimiterFilter.STEM_ENGLISH_POSSESSIVE, null))); - filters.add(PreConfiguredTokenFilter.singletonWithVersion("word_delimiter_graph", false, (input, version) -> { + filters.add(PreConfiguredTokenFilter.singletonWithVersion("word_delimiter_graph", false, false, (input, version) -> { boolean adjustOffsets = version.onOrAfter(Version.V_7_3_0); return new WordDelimiterGraphFilter(input, adjustOffsets, WordDelimiterIterator.DEFAULT_WORD_DELIM_TABLE, WordDelimiterGraphFilter.GENERATE_WORD_PARTS diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/SynonymsAnalysisTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/SynonymsAnalysisTests.java index a63dd975688..6582188f33c 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/SynonymsAnalysisTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/SynonymsAnalysisTests.java @@ -30,6 +30,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.analysis.IndexAnalyzers; +import org.elasticsearch.index.analysis.PreConfiguredTokenFilter; import org.elasticsearch.index.analysis.TokenFilterFactory; import org.elasticsearch.index.analysis.TokenizerFactory; import org.elasticsearch.test.ESTestCase; @@ -42,8 +43,11 @@ import java.io.InputStream; import java.nio.file.Files; import java.nio.file.Path; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; +import java.util.HashSet; import java.util.List; +import java.util.Set; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -163,23 +167,21 @@ public class SynonymsAnalysisTests extends ESTestCase { new int[]{ 1, 0 }); } - public void testKeywordRepeatAndSynonyms() throws IOException { + public void testPreconfigured() throws IOException { Settings settings = Settings.builder() .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) .put("path.home", createTempDir().toString()) .put("index.analysis.filter.synonyms.type", "synonym") - .putList("index.analysis.filter.synonyms.synonyms", "programmer, developer") - .put("index.analysis.filter.my_english.type", "stemmer") - .put("index.analysis.filter.my_english.language", "porter2") - .put("index.analysis.analyzer.synonymAnalyzer.tokenizer", "standard") - .putList("index.analysis.analyzer.synonymAnalyzer.filter", "lowercase", "keyword_repeat", "my_english", "synonyms") + .putList("index.analysis.filter.synonyms.synonyms", "würst, sausage") + .put("index.analysis.analyzer.my_analyzer.tokenizer", "standard") + .putList("index.analysis.analyzer.my_analyzer.filter", "lowercase", "asciifolding", "synonyms") .build(); IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); indexAnalyzers = createTestAnalysis(idxSettings, settings, new CommonAnalysisPlugin()).indexAnalyzers; - BaseTokenStreamTestCase.assertAnalyzesTo(indexAnalyzers.get("synonymAnalyzer"), "programmers", - new String[]{ "programmers", "programm", "develop" }, - new int[]{ 1, 0, 0 }); + BaseTokenStreamTestCase.assertAnalyzesTo(indexAnalyzers.get("my_analyzer"), "würst", + new String[]{ "wurst", "sausage"}, + new int[]{ 1, 0 }); } public void testChainedSynonymFilters() throws IOException { @@ -248,6 +250,58 @@ public class SynonymsAnalysisTests extends ESTestCase { } + public void testPreconfiguredTokenFilters() throws IOException { + Set disallowedFilters = new HashSet<>(Arrays.asList( + "common_grams", "edge_ngram", "edgeNGram", "keyword_repeat", "ngram", "nGram", + "shingle", "word_delimiter", "word_delimiter_graph" + )); + + Settings settings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, + VersionUtils.randomVersionBetween(random(), Version.V_7_0_0, Version.CURRENT)) + .put("path.home", createTempDir().toString()) + .build(); + IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); + + CommonAnalysisPlugin plugin = new CommonAnalysisPlugin(); + + for (PreConfiguredTokenFilter tf : plugin.getPreConfiguredTokenFilters()) { + if (disallowedFilters.contains(tf.getName())) { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + "Expected exception for factory " + tf.getName(), () -> { + tf.get(idxSettings, null, tf.getName(), settings).getSynonymFilter(); + }); + assertEquals(tf.getName(), "Token filter [" + tf.getName() + + "] cannot be used to parse synonyms", + e.getMessage()); + } + else { + tf.get(idxSettings, null, tf.getName(), settings).getSynonymFilter(); + } + } + + Settings settings2 = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, + VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, VersionUtils.getPreviousVersion(Version.V_7_0_0))) + .put("path.home", createTempDir().toString()) + .putList("common_words", "a", "b") + .put("output_unigrams", "true") + .build(); + IndexSettings idxSettings2 = IndexSettingsModule.newIndexSettings("index", settings2); + + List expectedWarnings = new ArrayList<>(); + for (PreConfiguredTokenFilter tf : plugin.getPreConfiguredTokenFilters()) { + if (disallowedFilters.contains(tf.getName())) { + tf.get(idxSettings2, null, tf.getName(), settings2).getSynonymFilter(); + expectedWarnings.add("Token filter [" + tf.getName() + "] will not be usable to parse synonyms after v7.0"); + } + else { + tf.get(idxSettings2, null, tf.getName(), settings2).getSynonymFilter(); + } + } + assertWarnings(expectedWarnings.toArray(new String[0])); + } + public void testDisallowedTokenFilters() throws IOException { Settings settings = Settings.builder() diff --git a/server/src/main/java/org/elasticsearch/index/analysis/PreConfiguredTokenFilter.java b/server/src/main/java/org/elasticsearch/index/analysis/PreConfiguredTokenFilter.java index 123802c9510..5776edd69fc 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/PreConfiguredTokenFilter.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/PreConfiguredTokenFilter.java @@ -19,9 +19,11 @@ package org.elasticsearch.index.analysis; +import org.apache.logging.log4j.LogManager; import org.apache.lucene.analysis.TokenFilter; import org.apache.lucene.analysis.TokenStream; import org.elasticsearch.Version; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.indices.analysis.PreBuiltCacheFactory; import org.elasticsearch.indices.analysis.PreBuiltCacheFactory.CachingStrategy; @@ -32,12 +34,16 @@ import java.util.function.Function; * Provides pre-configured, shared {@link TokenFilter}s. */ public final class PreConfiguredTokenFilter extends PreConfiguredAnalysisComponent { + + private static final DeprecationLogger DEPRECATION_LOGGER + = new DeprecationLogger(LogManager.getLogger(PreConfiguredTokenFilter.class)); + /** * Create a pre-configured token filter that may not vary at all. */ public static PreConfiguredTokenFilter singleton(String name, boolean useFilterForMultitermQueries, Function create) { - return new PreConfiguredTokenFilter(name, useFilterForMultitermQueries, false, CachingStrategy.ONE, + return new PreConfiguredTokenFilter(name, useFilterForMultitermQueries, true, CachingStrategy.ONE, (tokenStream, version) -> create.apply(tokenStream)); } @@ -45,27 +51,37 @@ public final class PreConfiguredTokenFilter extends PreConfiguredAnalysisCompone * Create a pre-configured token filter that may not vary at all. */ public static PreConfiguredTokenFilter singleton(String name, boolean useFilterForMultitermQueries, - boolean useFilterForParsingSynonyms, + boolean allowForSynonymParsing, Function create) { - return new PreConfiguredTokenFilter(name, useFilterForMultitermQueries, useFilterForParsingSynonyms, CachingStrategy.ONE, + return new PreConfiguredTokenFilter(name, useFilterForMultitermQueries, allowForSynonymParsing, CachingStrategy.ONE, (tokenStream, version) -> create.apply(tokenStream)); } /** - * Create a pre-configured token filter that may not vary at all. + * Create a pre-configured token filter that may vary based on the Elasticsearch version. */ public static PreConfiguredTokenFilter singletonWithVersion(String name, boolean useFilterForMultitermQueries, BiFunction create) { - return new PreConfiguredTokenFilter(name, useFilterForMultitermQueries, false, CachingStrategy.ONE, + return new PreConfiguredTokenFilter(name, useFilterForMultitermQueries, true, CachingStrategy.ONE, (tokenStream, version) -> create.apply(tokenStream, version)); } + /** + * Create a pre-configured token filter that may vary based on the Elasticsearch version. + */ + public static PreConfiguredTokenFilter singletonWithVersion(String name, boolean useFilterForMultitermQueries, + boolean useFilterForParsingSynonyms, + BiFunction create) { + return new PreConfiguredTokenFilter(name, useFilterForMultitermQueries, useFilterForParsingSynonyms, CachingStrategy.ONE, + (tokenStream, version) -> create.apply(tokenStream, version)); + } + /** * Create a pre-configured token filter that may vary based on the Lucene version. */ public static PreConfiguredTokenFilter luceneVersion(String name, boolean useFilterForMultitermQueries, BiFunction create) { - return new PreConfiguredTokenFilter(name, useFilterForMultitermQueries, false, CachingStrategy.LUCENE, + return new PreConfiguredTokenFilter(name, useFilterForMultitermQueries, true, CachingStrategy.LUCENE, (tokenStream, version) -> create.apply(tokenStream, version.luceneVersion)); } @@ -74,18 +90,18 @@ public final class PreConfiguredTokenFilter extends PreConfiguredAnalysisCompone */ public static PreConfiguredTokenFilter elasticsearchVersion(String name, boolean useFilterForMultitermQueries, BiFunction create) { - return new PreConfiguredTokenFilter(name, useFilterForMultitermQueries, false, CachingStrategy.ELASTICSEARCH, create); + return new PreConfiguredTokenFilter(name, useFilterForMultitermQueries, true, CachingStrategy.ELASTICSEARCH, create); } private final boolean useFilterForMultitermQueries; - private final boolean useFilterForParsingSynonyms; + private final boolean allowForSynonymParsing; private final BiFunction create; - private PreConfiguredTokenFilter(String name, boolean useFilterForMultitermQueries, boolean useFilterForParsingSynonyms, + private PreConfiguredTokenFilter(String name, boolean useFilterForMultitermQueries, boolean allowForSynonymParsing, PreBuiltCacheFactory.CachingStrategy cache, BiFunction create) { super(name, cache); this.useFilterForMultitermQueries = useFilterForMultitermQueries; - this.useFilterForParsingSynonyms = useFilterForParsingSynonyms; + this.allowForSynonymParsing = allowForSynonymParsing; this.create = create; } @@ -118,10 +134,17 @@ public final class PreConfiguredTokenFilter extends PreConfiguredAnalysisCompone @Override public TokenFilterFactory getSynonymFilter() { - if (useFilterForParsingSynonyms) { + if (allowForSynonymParsing) { + return this; + } + if (version.onOrAfter(Version.V_7_0_0)) { + throw new IllegalArgumentException("Token filter [" + name() + "] cannot be used to parse synonyms"); + } + else { + DEPRECATION_LOGGER.deprecatedAndMaybeLog(name(), "Token filter [" + name() + + "] will not be usable to parse synonyms after v7.0"); return this; } - return IDENTITY_FILTER; } }; } @@ -138,10 +161,17 @@ public final class PreConfiguredTokenFilter extends PreConfiguredAnalysisCompone @Override public TokenFilterFactory getSynonymFilter() { - if (useFilterForParsingSynonyms) { + if (allowForSynonymParsing) { + return this; + } + if (version.onOrAfter(Version.V_7_0_0)) { + throw new IllegalArgumentException("Token filter [" + name() + "] cannot be used to parse synonyms"); + } + else { + DEPRECATION_LOGGER.deprecatedAndMaybeLog(name(), "Token filter [" + name() + + "] will not be usable to parse synonyms after v7.0"); return this; } - return IDENTITY_FILTER; } }; } From 2cc7f5a744844addb22336fe2e6e3642ae422e4f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Fri, 28 Jun 2019 09:55:40 +0200 Subject: [PATCH 22/42] Allow reloading of search time analyzers (#43313) Currently changing resources (like dictionaries, synonym files etc...) of search time analyzers is only possible by closing an index, changing the underlying resource (e.g. synonym files) and then re-opening the index for the change to take effect. This PR adds a new API endpoint that allows triggering reloading of certain analysis resources (currently token filters) that will then pick up changes in underlying file resources. To achieve this we introduce a new type of custom analyzer (ReloadableCustomAnalyzer) that uses a ReuseStrategy that allows swapping out analysis components. Custom analyzers that contain filters that are markes as "updateable" will automatically choose this implementation. This PR also adds this capability to `synonym` token filters for use in search time analyzers. Relates to #29051 --- .../client/RestHighLevelClientTests.java | 4 +- .../tokenfilters/synonym-tokenfilter.asciidoc | 2 + .../indices/apis/reload-analyzers.asciidoc | 72 ++++++++ docs/reference/rest-api/index.asciidoc | 2 + .../common/SynonymTokenFilterFactory.java | 13 ++ .../analyze/TransportAnalyzeAction.java | 46 ++--- .../client/IndicesAdminClient.java | 1 + .../org/elasticsearch/client/Requests.java | 1 - .../index/analysis/AnalysisRegistry.java | 9 +- .../index/analysis/AnalyzerComponents.java | 111 ++++++++++++ .../analysis/AnalyzerComponentsProvider.java | 29 +++ .../index/analysis/CustomAnalyzer.java | 51 +++--- .../analysis/CustomAnalyzerProvider.java | 72 +++----- .../index/analysis/IndexAnalyzers.java | 7 + .../index/analysis/NamedAnalyzer.java | 4 +- .../analysis/ReloadableCustomAnalyzer.java | 162 +++++++++++++++++ .../index/mapper/FieldTypeLookup.java | 1 - .../index/mapper/MapperService.java | 22 +++ .../highlight/FragmentBuilderHelper.java | 7 +- .../phrase/PhraseSuggestionBuilder.java | 7 +- .../clear/ClearIndicesCacheResponseTests.java | 2 +- .../indices/flush/FlushResponseTests.java | 2 +- .../forcemerge/ForceMergeResponseTests.java | 2 +- .../indices/refresh/RefreshResponseTests.java | 2 +- .../query/ValidateQueryResponseTests.java | 2 +- .../ReloadableCustomAnalyzerTests.java | 168 ++++++++++++++++++ .../index/mapper/MapperServiceTests.java | 109 +++++++++++- .../AbstractBroadcastResponseTestCase.java | 3 +- .../elasticsearch/xpack/core/XPackPlugin.java | 5 + .../core/action/ReloadAnalyzerAction.java | 23 +++ .../core/action/ReloadAnalyzersRequest.java | 43 +++++ .../core/action/ReloadAnalyzersResponse.java | 89 ++++++++++ .../TransportReloadAnalyzersAction.java | 155 ++++++++++++++++ .../action/RestReloadAnalyzersAction.java | 40 +++++ .../action/ReloadAnalyzersResponseTests.java | 51 ++++++ .../action/ReloadSynonymAnalyzerTests.java | 102 +++++++++++ .../rest/action/ReloadSynonymAnalyzerIT.java | 120 +++++++++++++ .../api/indices.reload_search_analyzers.json | 33 ++++ 38 files changed, 1454 insertions(+), 120 deletions(-) create mode 100644 docs/reference/indices/apis/reload-analyzers.asciidoc create mode 100644 server/src/main/java/org/elasticsearch/index/analysis/AnalyzerComponents.java create mode 100644 server/src/main/java/org/elasticsearch/index/analysis/AnalyzerComponentsProvider.java create mode 100644 server/src/main/java/org/elasticsearch/index/analysis/ReloadableCustomAnalyzer.java create mode 100644 server/src/test/java/org/elasticsearch/index/analysis/ReloadableCustomAnalyzerTests.java rename {server/src/test/java/org/elasticsearch/action/support/broadcast => test/framework/src/main/java/org/elasticsearch/test}/AbstractBroadcastResponseTestCase.java (98%) create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/ReloadAnalyzerAction.java create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/ReloadAnalyzersRequest.java create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/ReloadAnalyzersResponse.java create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/TransportReloadAnalyzersAction.java create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestReloadAnalyzersAction.java create mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/action/ReloadAnalyzersResponseTests.java create mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/action/ReloadSynonymAnalyzerTests.java create mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rest/action/ReloadSynonymAnalyzerIT.java create mode 100644 x-pack/plugin/src/test/resources/rest-api-spec/api/indices.reload_search_analyzers.json diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java index 26e58420196..650bf2e4403 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java @@ -730,8 +730,8 @@ public class RestHighLevelClientTests extends ESTestCase { "indices.exists_type", "indices.get_upgrade", "indices.put_alias", - "scripts_painless_execute", - "render_search_template" + "render_search_template", + "scripts_painless_execute" }; //These API are not required for high-level client feature completeness String[] notRequiredApi = new String[] { diff --git a/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc index 139f7c3ab0a..f47e97d27ea 100644 --- a/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc @@ -43,6 +43,8 @@ Additional settings are: * `expand` (defaults to `true`). * `lenient` (defaults to `false`). If `true` ignores exceptions while parsing the synonym configuration. It is important to note that only those synonym rules which cannot get parsed are ignored. For instance consider the following request: + + [source,js] -------------------------------------------------- diff --git a/docs/reference/indices/apis/reload-analyzers.asciidoc b/docs/reference/indices/apis/reload-analyzers.asciidoc new file mode 100644 index 00000000000..f3365968df3 --- /dev/null +++ b/docs/reference/indices/apis/reload-analyzers.asciidoc @@ -0,0 +1,72 @@ +[role="xpack"] +[testenv="basic"] +[[indices-reload-analyzers]] +== Reload Search Analyzers + +experimental[] + +Reloads search analyzers and its resources. + +Synonym filters (both `synonym` and `synonym_graph`) can be declared as +updateable if they are only used in <> +with the `updateable` flag: + +[source,js] +-------------------------------------------------- +PUT /my_index +{ + "settings": { + "index" : { + "analysis" : { + "analyzer" : { + "my_synonyms" : { + "tokenizer" : "whitespace", + "filter" : ["synonym"] + } + }, + "filter" : { + "synonym" : { + "type" : "synonym", + "synonyms_path" : "analysis/synonym.txt", + "updateable" : true <1> + } + } + } + } + }, + "mappings": { + "properties": { + "text": { + "type": "text", + "analyzer" : "standard", + "search_analyzer": "my_synonyms" <2> + } + } + } +} +-------------------------------------------------- +// CONSOLE + +<1> Mark the synonym filter as updateable. +<2> Synonym analyzer is usable as a search_analyzer. + +NOTE: Trying to use the above analyzer as an index analyzer will result in an error. + +Using the <>, you can trigger reloading of the +synonym definition. The contents of the configured synonyms file will be reloaded and the +synonyms definition the filter uses will be updated. + +The `_reload_search_analyzers` API can be run on one or more indices and will trigger +reloading of the synonyms from the configured file. + +NOTE: Reloading will happen on every node the index has shards, so its important +to update the synonym file contents on every data node (even the ones that don't currently +hold shard copies; shards might be relocated there in the future) before calling +reload to ensure the new state of the file is reflected everywhere in the cluster. + +[source,js] +-------------------------------------------------- +POST /my_index/_reload_search_analyzers +-------------------------------------------------- +// CONSOLE +// TEST[s/^/PUT my_index\n/] \ No newline at end of file diff --git a/docs/reference/rest-api/index.asciidoc b/docs/reference/rest-api/index.asciidoc index dc73ac134d5..9f70c2a3cef 100644 --- a/docs/reference/rest-api/index.asciidoc +++ b/docs/reference/rest-api/index.asciidoc @@ -15,6 +15,7 @@ not be included yet. * <> * <> * <>, <> +* <> * <> * <> * <> @@ -38,4 +39,5 @@ include::{es-repo-dir}/rollup/rollup-api.asciidoc[] include::{xes-repo-dir}/rest-api/security.asciidoc[] include::{es-repo-dir}/indices/apis/unfreeze.asciidoc[] include::{xes-repo-dir}/rest-api/watcher.asciidoc[] +include::{es-repo-dir}/indices/apis/reload-analyzers.asciidoc[] include::defs.asciidoc[] diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SynonymTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SynonymTokenFilterFactory.java index 75d4eca4254..5d6135549b8 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SynonymTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SynonymTokenFilterFactory.java @@ -30,6 +30,7 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.analysis.AbstractTokenFilterFactory; import org.elasticsearch.index.analysis.Analysis; +import org.elasticsearch.index.analysis.AnalysisMode; import org.elasticsearch.index.analysis.CharFilterFactory; import org.elasticsearch.index.analysis.CustomAnalyzer; import org.elasticsearch.index.analysis.TokenFilterFactory; @@ -50,6 +51,7 @@ public class SynonymTokenFilterFactory extends AbstractTokenFilterFactory { private final boolean lenient; protected final Settings settings; protected final Environment environment; + private final boolean updateable; SynonymTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { @@ -65,9 +67,15 @@ public class SynonymTokenFilterFactory extends AbstractTokenFilterFactory { this.expand = settings.getAsBoolean("expand", true); this.lenient = settings.getAsBoolean("lenient", false); this.format = settings.get("format", ""); + this.updateable = settings.getAsBoolean("updateable", false); this.environment = env; } + @Override + public AnalysisMode getAnalysisMode() { + return this.updateable ? AnalysisMode.SEARCH_TIME : AnalysisMode.ALL; + } + @Override public TokenStream create(TokenStream tokenStream) { throw new IllegalStateException("Call createPerAnalyzerSynonymFactory to specialize this factory for an analysis chain first"); @@ -98,6 +106,11 @@ public class SynonymTokenFilterFactory extends AbstractTokenFilterFactory { // which doesn't support stacked input tokens return IDENTITY_FILTER; } + + @Override + public AnalysisMode getAnalysisMode() { + return updateable ? AnalysisMode.SEARCH_TIME : AnalysisMode.ALL; + } }; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java index 9a48592b5b8..773852860c6 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java @@ -42,8 +42,9 @@ import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.analysis.AnalysisRegistry; +import org.elasticsearch.index.analysis.AnalyzerComponents; +import org.elasticsearch.index.analysis.AnalyzerComponentsProvider; import org.elasticsearch.index.analysis.CharFilterFactory; -import org.elasticsearch.index.analysis.CustomAnalyzer; import org.elasticsearch.index.analysis.NameOrDefinition; import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.analysis.TokenFilterFactory; @@ -261,18 +262,23 @@ public class TransportAnalyzeAction extends TransportSingleShardAction includeAttributes) { + private void analyze(TokenStream stream, Set includeAttributes, int positionIncrementGap, int offsetGap) { try { stream.reset(); CharTermAttribute term = stream.addAttribute(CharTermAttribute.class); @@ -437,8 +443,8 @@ public class TransportAnalyzeAction extends TransportSingleShardAction listener); + } diff --git a/server/src/main/java/org/elasticsearch/client/Requests.java b/server/src/main/java/org/elasticsearch/client/Requests.java index 19ad2fb397e..dfb011a5a12 100644 --- a/server/src/main/java/org/elasticsearch/client/Requests.java +++ b/server/src/main/java/org/elasticsearch/client/Requests.java @@ -534,5 +534,4 @@ public class Requests { public static SnapshotsStatusRequest snapshotsStatusRequest(String repository) { return new SnapshotsStatusRequest(repository); } - } diff --git a/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java b/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java index 236731f6899..b8954a16e8d 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java @@ -527,7 +527,6 @@ public final class AnalysisRegistry implements Closeable { Map tokenizerFactoryFactories, Map charFilterFactoryFactories, Map tokenFilterFactoryFactories) { - Map analyzers = new HashMap<>(); Map normalizers = new HashMap<>(); Map whitespaceNormalizers = new HashMap<>(); @@ -569,9 +568,11 @@ public final class AnalysisRegistry implements Closeable { return new IndexAnalyzers(analyzers, normalizers, whitespaceNormalizers); } - private static NamedAnalyzer produceAnalyzer(String name, AnalyzerProvider analyzerFactory, - Map tokenFilters, Map charFilters, - Map tokenizers) { + private static NamedAnalyzer produceAnalyzer(String name, + AnalyzerProvider analyzerFactory, + Map tokenFilters, + Map charFilters, + Map tokenizers) { /* * Lucene defaults positionIncrementGap to 0 in all analyzers but * Elasticsearch defaults them to 0 only before version 2.0 diff --git a/server/src/main/java/org/elasticsearch/index/analysis/AnalyzerComponents.java b/server/src/main/java/org/elasticsearch/index/analysis/AnalyzerComponents.java new file mode 100644 index 00000000000..f150ac54558 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/analysis/AnalyzerComponents.java @@ -0,0 +1,111 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import org.elasticsearch.common.settings.Settings; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +/** + * A class that groups analysis components necessary to produce a custom analyzer. + * See {@link ReloadableCustomAnalyzer} for an example usage. + */ +public final class AnalyzerComponents { + private final String tokenizerName; + private final TokenizerFactory tokenizerFactory; + private final CharFilterFactory[] charFilters; + private final TokenFilterFactory[] tokenFilters; + private final AnalysisMode analysisMode; + + AnalyzerComponents(String tokenizerName, TokenizerFactory tokenizerFactory, CharFilterFactory[] charFilters, + TokenFilterFactory[] tokenFilters) { + this.tokenizerName = tokenizerName; + this.tokenizerFactory = tokenizerFactory; + this.charFilters = charFilters; + this.tokenFilters = tokenFilters; + AnalysisMode mode = AnalysisMode.ALL; + for (TokenFilterFactory f : tokenFilters) { + mode = mode.merge(f.getAnalysisMode()); + } + this.analysisMode = mode; + } + + static AnalyzerComponents createComponents(String name, Settings analyzerSettings, final Map tokenizers, + final Map charFilters, final Map tokenFilters) { + String tokenizerName = analyzerSettings.get("tokenizer"); + if (tokenizerName == null) { + throw new IllegalArgumentException("Custom Analyzer [" + name + "] must be configured with a tokenizer"); + } + + TokenizerFactory tokenizer = tokenizers.get(tokenizerName); + if (tokenizer == null) { + throw new IllegalArgumentException( + "Custom Analyzer [" + name + "] failed to find tokenizer under name " + "[" + tokenizerName + "]"); + } + + List charFilterNames = analyzerSettings.getAsList("char_filter"); + List charFiltersList = new ArrayList<>(charFilterNames.size()); + for (String charFilterName : charFilterNames) { + CharFilterFactory charFilter = charFilters.get(charFilterName); + if (charFilter == null) { + throw new IllegalArgumentException( + "Custom Analyzer [" + name + "] failed to find char_filter under name " + "[" + charFilterName + "]"); + } + charFiltersList.add(charFilter); + } + + List tokenFilterNames = analyzerSettings.getAsList("filter"); + List tokenFilterList = new ArrayList<>(tokenFilterNames.size()); + for (String tokenFilterName : tokenFilterNames) { + TokenFilterFactory tokenFilter = tokenFilters.get(tokenFilterName); + if (tokenFilter == null) { + throw new IllegalArgumentException( + "Custom Analyzer [" + name + "] failed to find filter under name " + "[" + tokenFilterName + "]"); + } + tokenFilter = tokenFilter.getChainAwareTokenFilterFactory(tokenizer, charFiltersList, tokenFilterList, tokenFilters::get); + tokenFilterList.add(tokenFilter); + } + + return new AnalyzerComponents(tokenizerName, tokenizer, charFiltersList.toArray(new CharFilterFactory[charFiltersList.size()]), + tokenFilterList.toArray(new TokenFilterFactory[tokenFilterList.size()])); + } + + public String getTokenizerName() { + return tokenizerName; + } + + public TokenizerFactory getTokenizerFactory() { + return tokenizerFactory; + } + + public TokenFilterFactory[] getTokenFilters() { + return tokenFilters; + } + + public CharFilterFactory[] getCharFilters() { + return charFilters; + } + + public AnalysisMode analysisMode() { + return this.analysisMode; + } +} \ No newline at end of file diff --git a/server/src/main/java/org/elasticsearch/index/analysis/AnalyzerComponentsProvider.java b/server/src/main/java/org/elasticsearch/index/analysis/AnalyzerComponentsProvider.java new file mode 100644 index 00000000000..84a3a14038f --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/analysis/AnalyzerComponentsProvider.java @@ -0,0 +1,29 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +/** + * Analyzers that provide access to their token filters should implement this + */ +public interface AnalyzerComponentsProvider { + + AnalyzerComponents getComponents(); + +} diff --git a/server/src/main/java/org/elasticsearch/index/analysis/CustomAnalyzer.java b/server/src/main/java/org/elasticsearch/index/analysis/CustomAnalyzer.java index a41ee335644..685dd2a7de0 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/CustomAnalyzer.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/CustomAnalyzer.java @@ -25,15 +25,9 @@ import org.apache.lucene.analysis.Tokenizer; import java.io.Reader; -public final class CustomAnalyzer extends Analyzer { - - private final String tokenizerName; - private final TokenizerFactory tokenizerFactory; - - private final CharFilterFactory[] charFilters; - - private final TokenFilterFactory[] tokenFilters; +public final class CustomAnalyzer extends Analyzer implements AnalyzerComponentsProvider { + private final AnalyzerComponents components; private final int positionIncrementGap; private final int offsetGap; private final AnalysisMode analysisMode; @@ -45,10 +39,7 @@ public final class CustomAnalyzer extends Analyzer { public CustomAnalyzer(String tokenizerName, TokenizerFactory tokenizerFactory, CharFilterFactory[] charFilters, TokenFilterFactory[] tokenFilters, int positionIncrementGap, int offsetGap) { - this.tokenizerName = tokenizerName; - this.tokenizerFactory = tokenizerFactory; - this.charFilters = charFilters; - this.tokenFilters = tokenFilters; + this.components = new AnalyzerComponents(tokenizerName, tokenizerFactory, charFilters, tokenFilters); this.positionIncrementGap = positionIncrementGap; this.offsetGap = offsetGap; // merge and transfer token filter analysis modes with analyzer @@ -63,19 +54,19 @@ public final class CustomAnalyzer extends Analyzer { * The name of the tokenizer as configured by the user. */ public String getTokenizerName() { - return tokenizerName; + return this.components.getTokenizerName(); } public TokenizerFactory tokenizerFactory() { - return tokenizerFactory; + return this.components.getTokenizerFactory(); } public TokenFilterFactory[] tokenFilters() { - return tokenFilters; + return this.components.getTokenFilters(); } public CharFilterFactory[] charFilters() { - return charFilters; + return this.components.getCharFilters(); } @Override @@ -95,11 +86,16 @@ public final class CustomAnalyzer extends Analyzer { return this.analysisMode; } + @Override + public AnalyzerComponents getComponents() { + return this.components; + } + @Override protected TokenStreamComponents createComponents(String fieldName) { - Tokenizer tokenizer = tokenizerFactory.create(); + Tokenizer tokenizer = this.tokenizerFactory().create(); TokenStream tokenStream = tokenizer; - for (TokenFilterFactory tokenFilter : tokenFilters) { + for (TokenFilterFactory tokenFilter : tokenFilters()) { tokenStream = tokenFilter.create(tokenStream); } return new TokenStreamComponents(tokenizer, tokenStream); @@ -107,6 +103,7 @@ public final class CustomAnalyzer extends Analyzer { @Override protected Reader initReader(String fieldName, Reader reader) { + CharFilterFactory[] charFilters = charFilters(); if (charFilters != null && charFilters.length > 0) { for (CharFilterFactory charFilter : charFilters) { reader = charFilter.create(reader); @@ -117,18 +114,18 @@ public final class CustomAnalyzer extends Analyzer { @Override protected Reader initReaderForNormalization(String fieldName, Reader reader) { - for (CharFilterFactory charFilter : charFilters) { - reader = charFilter.normalize(reader); - } - return reader; + for (CharFilterFactory charFilter : charFilters()) { + reader = charFilter.normalize(reader); + } + return reader; } @Override protected TokenStream normalize(String fieldName, TokenStream in) { - TokenStream result = in; - for (TokenFilterFactory filter : tokenFilters) { - result = filter.normalize(result); - } - return result; + TokenStream result = in; + for (TokenFilterFactory filter : tokenFilters()) { + result = filter.normalize(result); + } + return result; } } diff --git a/server/src/main/java/org/elasticsearch/index/analysis/CustomAnalyzerProvider.java b/server/src/main/java/org/elasticsearch/index/analysis/CustomAnalyzerProvider.java index 8080a6af876..d8a50838e9d 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/CustomAnalyzerProvider.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/CustomAnalyzerProvider.java @@ -19,23 +19,24 @@ package org.elasticsearch.index.analysis; +import org.apache.lucene.analysis.Analyzer; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.TextFieldMapper; -import java.util.ArrayList; -import java.util.List; import java.util.Map; +import static org.elasticsearch.index.analysis.AnalyzerComponents.createComponents; + /** * A custom analyzer that is built out of a single {@link org.apache.lucene.analysis.Tokenizer} and a list * of {@link org.apache.lucene.analysis.TokenFilter}s. */ -public class CustomAnalyzerProvider extends AbstractIndexAnalyzerProvider { +public class CustomAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final Settings analyzerSettings; - private CustomAnalyzer customAnalyzer; + private Analyzer customAnalyzer; public CustomAnalyzerProvider(IndexSettings indexSettings, String name, Settings settings) { @@ -43,58 +44,33 @@ public class CustomAnalyzerProvider extends AbstractIndexAnalyzerProvider tokenizers, final Map charFilters, - final Map tokenFilters) { - String tokenizerName = analyzerSettings.get("tokenizer"); - if (tokenizerName == null) { - throw new IllegalArgumentException("Custom Analyzer [" + name() + "] must be configured with a tokenizer"); - } - - TokenizerFactory tokenizer = tokenizers.get(tokenizerName); - if (tokenizer == null) { - throw new IllegalArgumentException("Custom Analyzer [" + name() + "] failed to find tokenizer under name " + - "[" + tokenizerName + "]"); - } - - List charFilterNames = analyzerSettings.getAsList("char_filter"); - List charFiltersList = new ArrayList<>(charFilterNames.size()); - for (String charFilterName : charFilterNames) { - CharFilterFactory charFilter = charFilters.get(charFilterName); - if (charFilter == null) { - throw new IllegalArgumentException("Custom Analyzer [" + name() + "] failed to find char_filter under name " + - "[" + charFilterName + "]"); - } - charFiltersList.add(charFilter); - } + void build(final Map tokenizers, + final Map charFilters, + final Map tokenFilters) { + customAnalyzer = create(name(), analyzerSettings, tokenizers, charFilters, tokenFilters); + } + /** + * Factory method that either returns a plain {@link ReloadableCustomAnalyzer} if the components used for creation are supporting index + * and search time use, or a {@link ReloadableCustomAnalyzer} if the components are intended for search time use only. + */ + private static Analyzer create(String name, Settings analyzerSettings, Map tokenizers, + Map charFilters, + Map tokenFilters) { int positionIncrementGap = TextFieldMapper.Defaults.POSITION_INCREMENT_GAP; - positionIncrementGap = analyzerSettings.getAsInt("position_increment_gap", positionIncrementGap); - int offsetGap = analyzerSettings.getAsInt("offset_gap", -1); - - List tokenFilterNames = analyzerSettings.getAsList("filter"); - List tokenFilterList = new ArrayList<>(tokenFilterNames.size()); - for (String tokenFilterName : tokenFilterNames) { - TokenFilterFactory tokenFilter = tokenFilters.get(tokenFilterName); - if (tokenFilter == null) { - throw new IllegalArgumentException("Custom Analyzer [" + name() + "] failed to find filter under name " + - "[" + tokenFilterName + "]"); - } - tokenFilter = tokenFilter.getChainAwareTokenFilterFactory(tokenizer, charFiltersList, tokenFilterList, tokenFilters::get); - tokenFilterList.add(tokenFilter); + AnalyzerComponents components = createComponents(name, analyzerSettings, tokenizers, charFilters, tokenFilters); + if (components.analysisMode().equals(AnalysisMode.SEARCH_TIME)) { + return new ReloadableCustomAnalyzer(components, positionIncrementGap, offsetGap); + } else { + return new CustomAnalyzer(components.getTokenizerName(), components.getTokenizerFactory(), components.getCharFilters(), + components.getTokenFilters(), positionIncrementGap, offsetGap); } - - this.customAnalyzer = new CustomAnalyzer(tokenizerName, tokenizer, - charFiltersList.toArray(new CharFilterFactory[charFiltersList.size()]), - tokenFilterList.toArray(new TokenFilterFactory[tokenFilterList.size()]), - positionIncrementGap, - offsetGap - ); } @Override - public CustomAnalyzer get() { + public Analyzer get() { return this.customAnalyzer; } } diff --git a/server/src/main/java/org/elasticsearch/index/analysis/IndexAnalyzers.java b/server/src/main/java/org/elasticsearch/index/analysis/IndexAnalyzers.java index 900a6560a66..be77df42cb9 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/IndexAnalyzers.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/IndexAnalyzers.java @@ -62,6 +62,13 @@ public final class IndexAnalyzers implements Closeable { return analyzers.get(name); } + /** + * Returns an (unmodifiable) map of containing the index analyzers + */ + public Map getAnalyzers() { + return analyzers; + } + /** * Returns a normalizer mapped to the given name or null if not present */ diff --git a/server/src/main/java/org/elasticsearch/index/analysis/NamedAnalyzer.java b/server/src/main/java/org/elasticsearch/index/analysis/NamedAnalyzer.java index 4831d88f3aa..0c53cc323d6 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/NamedAnalyzer.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/NamedAnalyzer.java @@ -112,8 +112,8 @@ public class NamedAnalyzer extends DelegatingAnalyzerWrapper { return; // everything allowed if this analyzer is in ALL mode } if (this.getAnalysisMode() != mode) { - if (analyzer instanceof CustomAnalyzer) { - TokenFilterFactory[] tokenFilters = ((CustomAnalyzer) analyzer).tokenFilters(); + if (analyzer instanceof AnalyzerComponentsProvider) { + TokenFilterFactory[] tokenFilters = ((AnalyzerComponentsProvider) analyzer).getComponents().getTokenFilters(); List offendingFilters = new ArrayList<>(); for (TokenFilterFactory tokenFilter : tokenFilters) { if (tokenFilter.getAnalysisMode() != mode) { diff --git a/server/src/main/java/org/elasticsearch/index/analysis/ReloadableCustomAnalyzer.java b/server/src/main/java/org/elasticsearch/index/analysis/ReloadableCustomAnalyzer.java new file mode 100644 index 00000000000..7d3b8532cae --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/analysis/ReloadableCustomAnalyzer.java @@ -0,0 +1,162 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.Tokenizer; +import org.apache.lucene.util.CloseableThreadLocal; +import org.elasticsearch.common.settings.Settings; + +import java.io.Reader; +import java.util.Map; + +public final class ReloadableCustomAnalyzer extends Analyzer implements AnalyzerComponentsProvider { + + private volatile AnalyzerComponents components; + + private CloseableThreadLocal storedComponents = new CloseableThreadLocal<>(); + + private final int positionIncrementGap; + + private final int offsetGap; + + /** + * An alternative {@link ReuseStrategy} that allows swapping the stored analyzer components when they change. + * This is used to change e.g. token filters in search time analyzers. + */ + private static final ReuseStrategy UPDATE_STRATEGY = new ReuseStrategy() { + @Override + public TokenStreamComponents getReusableComponents(Analyzer analyzer, String fieldName) { + ReloadableCustomAnalyzer custom = (ReloadableCustomAnalyzer) analyzer; + AnalyzerComponents components = custom.getComponents(); + AnalyzerComponents storedComponents = custom.getStoredComponents(); + if (storedComponents == null || components != storedComponents) { + custom.setStoredComponents(components); + return null; + } + TokenStreamComponents tokenStream = (TokenStreamComponents) getStoredValue(analyzer); + assert tokenStream != null; + return tokenStream; + } + + @Override + public void setReusableComponents(Analyzer analyzer, String fieldName, TokenStreamComponents tokenStream) { + setStoredValue(analyzer, tokenStream); + } + }; + + ReloadableCustomAnalyzer(AnalyzerComponents components, int positionIncrementGap, int offsetGap) { + super(UPDATE_STRATEGY); + if (components.analysisMode().equals(AnalysisMode.SEARCH_TIME) == false) { + throw new IllegalArgumentException( + "ReloadableCustomAnalyzer must only be initialized with analysis components in AnalysisMode.SEARCH_TIME mode"); + } + this.components = components; + this.positionIncrementGap = positionIncrementGap; + this.offsetGap = offsetGap; + } + + @Override + public AnalyzerComponents getComponents() { + return this.components; + } + + @Override + public int getPositionIncrementGap(String fieldName) { + return this.positionIncrementGap; + } + + @Override + public int getOffsetGap(String field) { + if (this.offsetGap < 0) { + return super.getOffsetGap(field); + } + return this.offsetGap; + } + + public AnalysisMode getAnalysisMode() { + return this.components.analysisMode(); + } + + @Override + protected Reader initReaderForNormalization(String fieldName, Reader reader) { + final AnalyzerComponents components = getComponents(); + for (CharFilterFactory charFilter : components.getCharFilters()) { + reader = charFilter.normalize(reader); + } + return reader; + } + + @Override + protected TokenStream normalize(String fieldName, TokenStream in) { + final AnalyzerComponents components = getComponents(); + TokenStream result = in; + for (TokenFilterFactory filter : components.getTokenFilters()) { + result = filter.normalize(result); + } + return result; + } + + public synchronized void reload(String name, + Settings settings, + final Map tokenizers, + final Map charFilters, + final Map tokenFilters) { + AnalyzerComponents components = AnalyzerComponents.createComponents(name, settings, tokenizers, charFilters, tokenFilters); + this.components = components; + } + + @Override + public void close() { + super.close(); + storedComponents.close(); + } + + private void setStoredComponents(AnalyzerComponents components) { + storedComponents.set(components); + } + + private AnalyzerComponents getStoredComponents() { + return storedComponents.get(); + } + + @Override + protected TokenStreamComponents createComponents(String fieldName) { + final AnalyzerComponents components = getStoredComponents(); + Tokenizer tokenizer = components.getTokenizerFactory().create(); + TokenStream tokenStream = tokenizer; + for (TokenFilterFactory tokenFilter : components.getTokenFilters()) { + tokenStream = tokenFilter.create(tokenStream); + } + return new TokenStreamComponents(tokenizer, tokenStream); + } + + @Override + protected Reader initReader(String fieldName, Reader reader) { + final AnalyzerComponents components = getStoredComponents(); + if (components.getCharFilters() != null && components.getCharFilters().length > 0) { + for (CharFilterFactory charFilter : components.getCharFilters()) { + reader = charFilter.create(reader); + } + } + return reader; + } +} diff --git a/server/src/main/java/org/elasticsearch/index/mapper/FieldTypeLookup.java b/server/src/main/java/org/elasticsearch/index/mapper/FieldTypeLookup.java index ab8b5c612fe..cc2bbd65c4d 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/FieldTypeLookup.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/FieldTypeLookup.java @@ -82,7 +82,6 @@ class FieldTypeLookup implements Iterable { return new FieldTypeLookup(fullName, aliases); } - /** Returns the field for the given field */ public MappedFieldType get(String field) { String concreteField = aliasToConcreteName.getOrDefault(field, field); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java index 487a6ac4789..fc7c94372f1 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -37,6 +37,7 @@ import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentFactory; @@ -46,8 +47,13 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.AbstractIndexComponent; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexSortConfig; +import org.elasticsearch.index.analysis.AnalysisRegistry; +import org.elasticsearch.index.analysis.CharFilterFactory; import org.elasticsearch.index.analysis.IndexAnalyzers; import org.elasticsearch.index.analysis.NamedAnalyzer; +import org.elasticsearch.index.analysis.ReloadableCustomAnalyzer; +import org.elasticsearch.index.analysis.TokenFilterFactory; +import org.elasticsearch.index.analysis.TokenizerFactory; import org.elasticsearch.index.mapper.Mapper.BuilderContext; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.similarity.SimilarityService; @@ -843,4 +849,20 @@ public class MapperService extends AbstractIndexComponent implements Closeable { return defaultAnalyzer; } } + + public synchronized void reloadSearchAnalyzers(AnalysisRegistry registry) throws IOException { + logger.info("reloading search analyzers"); + // refresh indexAnalyzers and search analyzers + final Map tokenizerFactories = registry.buildTokenizerFactories(indexSettings); + final Map charFilterFactories = registry.buildCharFilterFactories(indexSettings); + final Map tokenFilterFactories = registry.buildTokenFilterFactories(indexSettings); + final Map settings = indexSettings.getSettings().getGroups("index.analysis.analyzer"); + for (NamedAnalyzer namedAnalyzer : indexAnalyzers.getAnalyzers().values()) { + if (namedAnalyzer.analyzer() instanceof ReloadableCustomAnalyzer) { + ReloadableCustomAnalyzer analyzer = (ReloadableCustomAnalyzer) namedAnalyzer.analyzer(); + Settings analyzerSettings = settings.get(namedAnalyzer.name()); + analyzer.reload(namedAnalyzer.name(), analyzerSettings, tokenizerFactories, charFilterFactories, tokenFilterFactories); + } + } + } } diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/FragmentBuilderHelper.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/FragmentBuilderHelper.java index 583516c5cd4..d896d3a9d92 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/FragmentBuilderHelper.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/FragmentBuilderHelper.java @@ -26,7 +26,7 @@ import org.apache.lucene.search.vectorhighlight.FieldFragList.WeightedFragInfo; import org.apache.lucene.search.vectorhighlight.FieldFragList.WeightedFragInfo.SubInfo; import org.apache.lucene.search.vectorhighlight.FragmentsBuilder; import org.apache.lucene.util.CollectionUtil; -import org.elasticsearch.index.analysis.CustomAnalyzer; +import org.elasticsearch.index.analysis.AnalyzerComponentsProvider; import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.analysis.TokenFilterFactory; import org.elasticsearch.index.mapper.MappedFieldType; @@ -81,9 +81,8 @@ public final class FragmentBuilderHelper { if (analyzer instanceof NamedAnalyzer) { analyzer = ((NamedAnalyzer) analyzer).analyzer(); } - if (analyzer instanceof CustomAnalyzer) { - final CustomAnalyzer a = (CustomAnalyzer) analyzer; - TokenFilterFactory[] tokenFilters = a.tokenFilters(); + if (analyzer instanceof AnalyzerComponentsProvider) { + final TokenFilterFactory[] tokenFilters = ((AnalyzerComponentsProvider) analyzer).getComponents().getTokenFilters(); for (TokenFilterFactory tokenFilterFactory : tokenFilters) { if (tokenFilterFactory.breaksFastVectorHighlighter()) { return true; diff --git a/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java b/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java index 74b9437d678..5b66badc733 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java @@ -31,7 +31,7 @@ import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.Token; -import org.elasticsearch.index.analysis.CustomAnalyzer; +import org.elasticsearch.index.analysis.AnalyzerComponentsProvider; import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.analysis.ShingleTokenFilterFactory; import org.elasticsearch.index.analysis.TokenFilterFactory; @@ -675,9 +675,8 @@ public class PhraseSuggestionBuilder extends SuggestionBuilder= 0 ? offsetGap : 1, analyzer.getOffsetGap(randomAlphaOfLength(5))); + assertEquals("standard", analyzer.getComponents().getTokenizerName()); + assertEquals(0, analyzer.getComponents().getCharFilters().length); + assertSame(testAnalysis.tokenizer.get("standard"), analyzer.getComponents().getTokenizerFactory()); + assertEquals(1, analyzer.getComponents().getTokenFilters().length); + assertSame(NO_OP_SEARCH_TIME_FILTER, analyzer.getComponents().getTokenFilters()[0]); + } + + // check that when using regular non-search time filters only, we get an exception + final Settings indexAnalyzerSettings = Settings.builder() + .put("tokenizer", "standard") + .putList("filter", "lowercase") + .build(); + AnalyzerComponents indexAnalyzerComponents = createComponents("my_analyzer", indexAnalyzerSettings, testAnalysis.tokenizer, + testAnalysis.charFilter, testAnalysis.tokenFilter); + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, + () -> new ReloadableCustomAnalyzer(indexAnalyzerComponents, positionIncrementGap, offsetGap)); + assertEquals("ReloadableCustomAnalyzer must only be initialized with analysis components in AnalysisMode.SEARCH_TIME mode", + ex.getMessage()); + } + + /** + * start multiple threads that create token streams from this analyzer until reloaded tokenfilter takes effect + */ + public void testReloading() throws IOException, InterruptedException { + Settings analyzerSettings = Settings.builder() + .put("tokenizer", "standard") + .putList("filter", "my_filter") + .build(); + + AnalyzerComponents components = createComponents("my_analyzer", analyzerSettings, testAnalysis.tokenizer, testAnalysis.charFilter, + Collections.singletonMap("my_filter", NO_OP_SEARCH_TIME_FILTER)); + int numThreads = randomIntBetween(5, 10); + + ExecutorService executorService = Executors.newFixedThreadPool(numThreads); + CountDownLatch firstCheckpoint = new CountDownLatch(numThreads); + CountDownLatch secondCheckpoint = new CountDownLatch(numThreads); + + try (ReloadableCustomAnalyzer analyzer = new ReloadableCustomAnalyzer(components, 0, 0)) { + executorService.submit(() -> { + while (secondCheckpoint.getCount() > 0) { + try (TokenStream firstTokenStream = analyzer.tokenStream("myField", "TEXT")) { + firstTokenStream.reset(); + CharTermAttribute term = firstTokenStream.addAttribute(CharTermAttribute.class); + assertTrue(firstTokenStream.incrementToken()); + if (term.toString().equals("TEXT")) { + firstCheckpoint.countDown(); + } + if (term.toString().equals("text")) { + secondCheckpoint.countDown(); + } + assertFalse(firstTokenStream.incrementToken()); + firstTokenStream.end(); + } catch (Exception e) { + throw ExceptionsHelper.convertToRuntime(e); + } + } + }); + + // wait until all running threads have seen the unaltered upper case analysis at least once + assertTrue(firstCheckpoint.await(5, TimeUnit.SECONDS)); + + analyzer.reload("my_analyzer", analyzerSettings, testAnalysis.tokenizer, testAnalysis.charFilter, + Collections.singletonMap("my_filter", LOWERCASE_SEARCH_TIME_FILTER)); + + // wait until all running threads have seen the new lower case analysis at least once + assertTrue(secondCheckpoint.await(5, TimeUnit.SECONDS)); + + executorService.shutdown(); + executorService.awaitTermination(1, TimeUnit.SECONDS); + } + } +} diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java index d8c120e492d..8e03c12cfbe 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java @@ -19,7 +19,9 @@ package org.elasticsearch.index.mapper; +import org.apache.lucene.analysis.TokenStream; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressedXContent; @@ -27,18 +29,30 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AnalysisMode; +import org.elasticsearch.index.analysis.AnalysisRegistry; +import org.elasticsearch.index.analysis.IndexAnalyzers; +import org.elasticsearch.index.analysis.NamedAnalyzer; +import org.elasticsearch.index.analysis.ReloadableCustomAnalyzer; +import org.elasticsearch.index.analysis.TokenFilterFactory; import org.elasticsearch.index.mapper.KeywordFieldMapper.KeywordFieldType; import org.elasticsearch.index.mapper.MapperService.MergeReason; import org.elasticsearch.index.mapper.NumberFieldMapper.NumberFieldType; import org.elasticsearch.indices.InvalidTypeNameException; +import org.elasticsearch.indices.analysis.AnalysisModule.AnalysisProvider; +import org.elasticsearch.plugins.AnalysisPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.InternalSettingsPlugin; import java.io.IOException; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.Map; import java.util.concurrent.ExecutionException; import static org.hamcrest.CoreMatchers.containsString; @@ -49,7 +63,7 @@ public class MapperServiceTests extends ESSingleNodeTestCase { @Override protected Collection> getPlugins() { - return Collections.singleton(InternalSettingsPlugin.class); + return Arrays.asList(InternalSettingsPlugin.class, ReloadableFilterPlugin.class); } public void testTypeNameStartsWithIllegalDot() { @@ -434,4 +448,97 @@ public class MapperServiceTests extends ESSingleNodeTestCase { assertEquals(testString, documentMapper.mappers().getMapper(testString).simpleName()); } + public void testReloadSearchAnalyzers() throws IOException { + Settings settings = Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) + .put("index.analysis.analyzer.reloadableAnalyzer.type", "custom") + .put("index.analysis.analyzer.reloadableAnalyzer.tokenizer", "standard") + .putList("index.analysis.analyzer.reloadableAnalyzer.filter", "myReloadableFilter").build(); + + MapperService mapperService = createIndex("test_index", settings).mapperService(); + CompressedXContent mapping = new CompressedXContent(BytesReference.bytes( + XContentFactory.jsonBuilder().startObject().startObject("_doc") + .startObject("properties") + .startObject("field") + .field("type", "text") + .field("analyzer", "simple") + .field("search_analyzer", "reloadableAnalyzer") + .field("search_quote_analyzer", "stop") + .endObject() + .startObject("otherField") + .field("type", "text") + .field("analyzer", "standard") + .field("search_analyzer", "simple") + .field("search_quote_analyzer", "reloadableAnalyzer") + .endObject() + .endObject() + .endObject().endObject())); + + mapperService.merge("_doc", mapping, MergeReason.MAPPING_UPDATE); + IndexAnalyzers current = mapperService.getIndexAnalyzers(); + + ReloadableCustomAnalyzer originalReloadableAnalyzer = (ReloadableCustomAnalyzer) current.get("reloadableAnalyzer").analyzer(); + TokenFilterFactory[] originalTokenFilters = originalReloadableAnalyzer.getComponents().getTokenFilters(); + assertEquals(1, originalTokenFilters.length); + assertEquals("myReloadableFilter", originalTokenFilters[0].name()); + + // now reload, this should change the tokenfilterFactory inside the analyzer + mapperService.reloadSearchAnalyzers(getInstanceFromNode(AnalysisRegistry.class)); + IndexAnalyzers updatedAnalyzers = mapperService.getIndexAnalyzers(); + assertSame(current, updatedAnalyzers); + assertSame(current.getDefaultIndexAnalyzer(), updatedAnalyzers.getDefaultIndexAnalyzer()); + assertSame(current.getDefaultSearchAnalyzer(), updatedAnalyzers.getDefaultSearchAnalyzer()); + assertSame(current.getDefaultSearchQuoteAnalyzer(), updatedAnalyzers.getDefaultSearchQuoteAnalyzer()); + + assertFalse(assertSameContainedFilters(originalTokenFilters, current.get("reloadableAnalyzer"))); + assertFalse(assertSameContainedFilters(originalTokenFilters, mapperService.fullName("field").searchAnalyzer())); + assertFalse(assertSameContainedFilters(originalTokenFilters, mapperService.fullName("otherField").searchQuoteAnalyzer())); + } + + private boolean assertSameContainedFilters(TokenFilterFactory[] originalTokenFilter, NamedAnalyzer updatedAnalyzer) { + ReloadableCustomAnalyzer updatedReloadableAnalyzer = (ReloadableCustomAnalyzer) updatedAnalyzer.analyzer(); + TokenFilterFactory[] newTokenFilters = updatedReloadableAnalyzer.getComponents().getTokenFilters(); + assertEquals(originalTokenFilter.length, newTokenFilters.length); + int i = 0; + for (TokenFilterFactory tf : newTokenFilters ) { + assertEquals(originalTokenFilter[i].name(), tf.name()); + if (originalTokenFilter[i] != tf) { + return false; + } + i++; + } + return true; + } + + public static final class ReloadableFilterPlugin extends Plugin implements AnalysisPlugin { + + @Override + public Map> getTokenFilters() { + return Collections.singletonMap("myReloadableFilter", new AnalysisProvider() { + + @Override + public TokenFilterFactory get(IndexSettings indexSettings, Environment environment, String name, Settings settings) + throws IOException { + return new TokenFilterFactory() { + + @Override + public String name() { + return "myReloadableFilter"; + } + + @Override + public TokenStream create(TokenStream tokenStream) { + return tokenStream; + } + + @Override + public AnalysisMode getAnalysisMode() { + return AnalysisMode.SEARCH_TIME; + } + }; + } + }); + } + } + } diff --git a/server/src/test/java/org/elasticsearch/action/support/broadcast/AbstractBroadcastResponseTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractBroadcastResponseTestCase.java similarity index 98% rename from server/src/test/java/org/elasticsearch/action/support/broadcast/AbstractBroadcastResponseTestCase.java rename to test/framework/src/main/java/org/elasticsearch/test/AbstractBroadcastResponseTestCase.java index 5bf48fa5897..87084577baa 100644 --- a/server/src/test/java/org/elasticsearch/action/support/broadcast/AbstractBroadcastResponseTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractBroadcastResponseTestCase.java @@ -17,10 +17,11 @@ * under the License. */ -package org.elasticsearch.action.support.broadcast; +package org.elasticsearch.test; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.ToXContent; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java index 2038b35b4e6..f0a871df295 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java @@ -56,13 +56,16 @@ import org.elasticsearch.script.ScriptService; import org.elasticsearch.snapshots.SourceOnlySnapshotRepository; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.watcher.ResourceWatcherService; +import org.elasticsearch.xpack.core.action.ReloadAnalyzerAction; import org.elasticsearch.xpack.core.action.TransportFreezeIndexAction; +import org.elasticsearch.xpack.core.action.TransportReloadAnalyzersAction; import org.elasticsearch.xpack.core.action.TransportXPackInfoAction; import org.elasticsearch.xpack.core.action.TransportXPackUsageAction; import org.elasticsearch.xpack.core.action.XPackInfoAction; import org.elasticsearch.xpack.core.action.XPackUsageAction; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.rest.action.RestFreezeIndexAction; +import org.elasticsearch.xpack.core.rest.action.RestReloadAnalyzersAction; import org.elasticsearch.xpack.core.rest.action.RestXPackInfoAction; import org.elasticsearch.xpack.core.rest.action.RestXPackUsageAction; import org.elasticsearch.xpack.core.security.authc.TokenMetaData; @@ -272,6 +275,7 @@ public class XPackPlugin extends XPackClientPlugin implements ExtensiblePlugin, actions.add(new ActionHandler<>(TransportFreezeIndexAction.FreezeIndexAction.INSTANCE, TransportFreezeIndexAction.class)); actions.addAll(licensing.getActions()); + actions.add(new ActionHandler<>(ReloadAnalyzerAction.INSTANCE, TransportReloadAnalyzersAction.class)); return actions; } @@ -298,6 +302,7 @@ public class XPackPlugin extends XPackClientPlugin implements ExtensiblePlugin, handlers.add(new RestXPackInfoAction(settings, restController)); handlers.add(new RestXPackUsageAction(settings, restController)); handlers.add(new RestFreezeIndexAction(settings, restController)); + handlers.add(new RestReloadAnalyzersAction(settings, restController)); handlers.addAll(licensing.getRestHandlers(settings, restController, clusterSettings, indexScopedSettings, settingsFilter, indexNameExpressionResolver, nodesInCluster)); return handlers; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/ReloadAnalyzerAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/ReloadAnalyzerAction.java new file mode 100644 index 00000000000..f37df1ec820 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/ReloadAnalyzerAction.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.action; + +import org.elasticsearch.action.Action; + +public class ReloadAnalyzerAction extends Action { + + public static final ReloadAnalyzerAction INSTANCE = new ReloadAnalyzerAction(); + public static final String NAME = "indices:admin/reload_analyzers"; + + private ReloadAnalyzerAction() { + super(NAME); + } + + @Override + public ReloadAnalyzersResponse newResponse() { + return new ReloadAnalyzersResponse(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/ReloadAnalyzersRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/ReloadAnalyzersRequest.java new file mode 100644 index 00000000000..8721abd3403 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/ReloadAnalyzersRequest.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.action; + +import org.elasticsearch.action.support.broadcast.BroadcastRequest; + +import java.util.Arrays; +import java.util.Objects; + +/** + * Request for reloading index search analyzers + */ +public class ReloadAnalyzersRequest extends BroadcastRequest { + + /** + * Constructs a new request for reloading index search analyzers for one or more indices + */ + public ReloadAnalyzersRequest(String... indices) { + super(indices); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + ReloadAnalyzersRequest that = (ReloadAnalyzersRequest) o; + return Objects.equals(indicesOptions(), that.indicesOptions()) + && Arrays.equals(indices, that.indices); + } + + @Override + public int hashCode() { + return Objects.hash(indicesOptions(), Arrays.hashCode(indices)); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/ReloadAnalyzersResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/ReloadAnalyzersResponse.java new file mode 100644 index 00000000000..263dcf7debd --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/ReloadAnalyzersResponse.java @@ -0,0 +1,89 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.action; + +import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + +/** + * The response object that will be returned when reloading analyzers + */ +public class ReloadAnalyzersResponse extends BroadcastResponse { + + private final Map> reloadedIndicesNodes; + + public ReloadAnalyzersResponse() { + reloadedIndicesNodes = Collections.emptyMap(); + } + + public ReloadAnalyzersResponse(int totalShards, int successfulShards, int failedShards, + List shardFailures, Map> reloadedIndicesNodes) { + super(totalShards, successfulShards, failedShards, shardFailures); + this.reloadedIndicesNodes = reloadedIndicesNodes; + } + + /** + * Override in subclass to add custom fields following the common `_shards` field + */ + @Override + protected void addCustomXContentFields(XContentBuilder builder, Params params) throws IOException { + builder.startArray("reloaded_nodes"); + for (Entry> indexNodesReloaded : reloadedIndicesNodes.entrySet()) { + builder.startObject(); + builder.field("index", indexNodesReloaded.getKey()); + builder.field("reloaded_node_ids", indexNodesReloaded.getValue()); + builder.endObject(); + } + builder.endArray(); + } + + @SuppressWarnings({ "unchecked" }) + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("reload_analyzer", + true, arg -> { + BroadcastResponse response = (BroadcastResponse) arg[0]; + List>> results = (List>>) arg[1]; + Map> reloadedNodeIds = new HashMap<>(); + for (Tuple> result : results) { + reloadedNodeIds.put(result.v1(), result.v2()); + } + return new ReloadAnalyzersResponse(response.getTotalShards(), response.getSuccessfulShards(), response.getFailedShards(), + Arrays.asList(response.getShardFailures()), reloadedNodeIds); + }); + + @SuppressWarnings({ "unchecked" }) + private static final ConstructingObjectParser>, Void> ENTRY_PARSER = new ConstructingObjectParser<>( + "reload_analyzer.entry", true, arg -> { + String index = (String) arg[0]; + List nodeIds = (List) arg[1]; + return new Tuple<>(index, nodeIds); + }); + + static { + declareBroadcastFields(PARSER); + PARSER.declareObjectArray(constructorArg(), ENTRY_PARSER, new ParseField("reloaded_nodes")); + ENTRY_PARSER.declareString(constructorArg(), new ParseField("index")); + ENTRY_PARSER.declareStringArray(constructorArg(), new ParseField("reloaded_node_ids")); + } + + public static ReloadAnalyzersResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/TransportReloadAnalyzersAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/TransportReloadAnalyzersAction.java new file mode 100644 index 00000000000..d9c0b6f243d --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/TransportReloadAnalyzersAction.java @@ -0,0 +1,155 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.action; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.cluster.routing.IndexShardRoutingTable; +import org.elasticsearch.cluster.routing.PlainShardsIterator; +import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardsIterator; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.action.TransportReloadAnalyzersAction.ReloadResult; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + + +/** + * Indices clear cache action. + */ +public class TransportReloadAnalyzersAction + extends TransportBroadcastByNodeAction { + + private static final Logger logger = LogManager.getLogger(TransportReloadAnalyzersAction.class); + private final IndicesService indicesService; + + @Inject + public TransportReloadAnalyzersAction(ClusterService clusterService, TransportService transportService, IndicesService indicesService, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { + super(ReloadAnalyzerAction.NAME, clusterService, transportService, actionFilters, indexNameExpressionResolver, + ReloadAnalyzersRequest::new, ThreadPool.Names.MANAGEMENT, false); + this.indicesService = indicesService; + } + + @Override + protected ReloadResult readShardResult(StreamInput in) throws IOException { + ReloadResult reloadResult = new ReloadResult(); + reloadResult.readFrom(in); + return reloadResult; + } + + @Override + protected ReloadAnalyzersResponse newResponse(ReloadAnalyzersRequest request, int totalShards, int successfulShards, int failedShards, + List responses, List shardFailures, ClusterState clusterState) { + Map> reloadedIndicesNodes = new HashMap>(); + for (ReloadResult result : responses) { + if (reloadedIndicesNodes.containsKey(result.index)) { + List nodes = reloadedIndicesNodes.get(result.index); + nodes.add(result.nodeId); + } else { + List nodes = new ArrayList<>(); + nodes.add(result.nodeId); + reloadedIndicesNodes.put(result.index, nodes); + } + } + return new ReloadAnalyzersResponse(totalShards, successfulShards, failedShards, shardFailures, reloadedIndicesNodes); + } + + @Override + protected ReloadAnalyzersRequest readRequestFrom(StreamInput in) throws IOException { + final ReloadAnalyzersRequest request = new ReloadAnalyzersRequest(); + request.readFrom(in); + return request; + } + + @Override + protected ReloadResult shardOperation(ReloadAnalyzersRequest request, ShardRouting shardRouting) throws IOException { + logger.info("reloading analyzers for index shard " + shardRouting); + IndexService indexService = indicesService.indexService(shardRouting.index()); + indexService.mapperService().reloadSearchAnalyzers(indicesService.getAnalysis()); + return new ReloadResult(shardRouting.index().getName(), shardRouting.currentNodeId()); + } + + public static final class ReloadResult implements Streamable { + String index; + String nodeId; + + private ReloadResult(String index, String nodeId) { + this.index = index; + this.nodeId = nodeId; + } + + private ReloadResult() { + } + + @Override + public void readFrom(StreamInput in) throws IOException { + this.index = in.readString(); + this.nodeId = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(index); + out.writeString(nodeId); + } + } + + /** + * The reload request should go to only one shard per node the index lives on + */ + @Override + protected ShardsIterator shards(ClusterState clusterState, ReloadAnalyzersRequest request, String[] concreteIndices) { + RoutingTable routingTable = clusterState.routingTable(); + List shards = new ArrayList<>(); + for (String index : concreteIndices) { + Set nodesCovered = new HashSet<>(); + IndexRoutingTable indexRoutingTable = routingTable.index(index); + for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) { + for (ShardRouting shardRouting : indexShardRoutingTable) { + if (nodesCovered.contains(shardRouting.currentNodeId()) == false) { + shards.add(shardRouting); + nodesCovered.add(shardRouting.currentNodeId()); + } + } + } + } + return new PlainShardsIterator(shards); + } + + @Override + protected ClusterBlockException checkGlobalBlock(ClusterState state, ReloadAnalyzersRequest request) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } + + @Override + protected ClusterBlockException checkRequestBlock(ClusterState state, ReloadAnalyzersRequest request, String[] concreteIndices) { + return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, concreteIndices); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestReloadAnalyzersAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestReloadAnalyzersAction.java new file mode 100644 index 00000000000..3b379e8cebb --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestReloadAnalyzersAction.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.rest.action; + +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.action.ReloadAnalyzerAction; +import org.elasticsearch.xpack.core.action.ReloadAnalyzersRequest; + +import java.io.IOException; +public class RestReloadAnalyzersAction extends BaseRestHandler { + + public RestReloadAnalyzersAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(RestRequest.Method.GET, "/{index}/_reload_search_analyzers", this); + controller.registerHandler(RestRequest.Method.POST, "/{index}/_reload_search_analyzers", this); + } + + @Override + public String getName() { + return "reload_search_analyzers_action"; + } + + @Override + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + ReloadAnalyzersRequest reloadAnalyzersRequest = new ReloadAnalyzersRequest( + Strings.splitStringByCommaToArray(request.param("index"))); + reloadAnalyzersRequest.indicesOptions(IndicesOptions.fromRequest(request, reloadAnalyzersRequest.indicesOptions())); + return channel -> client.execute(ReloadAnalyzerAction.INSTANCE, reloadAnalyzersRequest, new RestToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/action/ReloadAnalyzersResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/action/ReloadAnalyzersResponseTests.java new file mode 100644 index 00000000000..cf1ad5909ba --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/action/ReloadAnalyzersResponseTests.java @@ -0,0 +1,51 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.action; + +import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractBroadcastResponseTestCase; +import org.elasticsearch.xpack.core.action.ReloadAnalyzersResponse; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class ReloadAnalyzersResponseTests extends AbstractBroadcastResponseTestCase { + + @Override + protected ReloadAnalyzersResponse createTestInstance(int totalShards, int successfulShards, int failedShards, + List failures) { + Map> reloadedIndicesNodes = new HashMap<>(); + int randomIndices = randomIntBetween(0, 5); + for (int i = 0; i < randomIndices; i++) { + List randomNodeIds = Arrays.asList(generateRandomStringArray(5, 5, false, true)); + reloadedIndicesNodes.put(randomAlphaOfLengthBetween(5, 10), randomNodeIds); + } + return new ReloadAnalyzersResponse(totalShards, successfulShards, failedShards, failures, reloadedIndicesNodes); + } + + @Override + protected ReloadAnalyzersResponse doParseInstance(XContentParser parser) throws IOException { + return ReloadAnalyzersResponse.fromXContent(parser); + } + + @Override + public void testToXContent() { + Map> reloadedIndicesNodes = Collections.singletonMap("index", Collections.singletonList("nodeId")); + ReloadAnalyzersResponse response = new ReloadAnalyzersResponse(10, 5, 5, null, reloadedIndicesNodes); + String output = Strings.toString(response); + assertEquals( + "{\"_shards\":{\"total\":10,\"successful\":5,\"failed\":5}," + + "\"reloaded_nodes\":[{\"index\":\"index\",\"reloaded_node_ids\":[\"nodeId\"]}]" + + "}", + output); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/action/ReloadSynonymAnalyzerTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/action/ReloadSynonymAnalyzerTests.java new file mode 100644 index 00000000000..e0e8de3d23d --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/action/ReloadSynonymAnalyzerTests.java @@ -0,0 +1,102 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.action; + +import org.elasticsearch.action.admin.indices.analyze.AnalyzeAction.AnalyzeToken; +import org.elasticsearch.action.admin.indices.analyze.AnalyzeAction.Response; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.analysis.common.CommonAnalysisPlugin; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; +import org.elasticsearch.xpack.core.action.ReloadAnalyzerAction; +import org.elasticsearch.xpack.core.action.ReloadAnalyzersRequest; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.OutputStreamWriter; +import java.io.PrintWriter; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashSet; +import java.util.Set; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; + +public class ReloadSynonymAnalyzerTests extends ESSingleNodeTestCase { + + @Override + protected Collection> getPlugins() { + return Arrays.asList(LocalStateCompositeXPackPlugin.class, CommonAnalysisPlugin.class); + } + + public void testSynonymsUpdateable() throws FileNotFoundException, IOException { + String synonymsFileName = "synonyms.txt"; + Path configDir = node().getEnvironment().configFile(); + if (Files.exists(configDir) == false) { + Files.createDirectory(configDir); + } + Path synonymsFile = configDir.resolve(synonymsFileName); + if (Files.exists(synonymsFile) == false) { + Files.createFile(synonymsFile); + } + try (PrintWriter out = new PrintWriter( + new OutputStreamWriter(Files.newOutputStream(synonymsFile, StandardOpenOption.WRITE), StandardCharsets.UTF_8))) { + out.println("foo, baz"); + } + + assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder() + .put("index.number_of_shards", 5) + .put("index.number_of_replicas", 0) + .put("analysis.analyzer.my_synonym_analyzer.tokenizer", "standard") + .putList("analysis.analyzer.my_synonym_analyzer.filter", "lowercase", "my_synonym_filter") + .put("analysis.filter.my_synonym_filter.type", "synonym") + .put("analysis.filter.my_synonym_filter.updateable", "true") + .put("analysis.filter.my_synonym_filter.synonyms_path", synonymsFileName)) + .addMapping("_doc", "field", "type=text,analyzer=standard,search_analyzer=my_synonym_analyzer")); + + client().prepareIndex("test", "_doc", "1").setSource("field", "Foo").get(); + assertNoFailures(client().admin().indices().prepareRefresh("test").execute().actionGet()); + + SearchResponse response = client().prepareSearch("test").setQuery(QueryBuilders.matchQuery("field", "baz")).get(); + assertHitCount(response, 1L); + response = client().prepareSearch("test").setQuery(QueryBuilders.matchQuery("field", "buzz")).get(); + assertHitCount(response, 0L); + Response analyzeResponse = client().admin().indices().prepareAnalyze("test", "foo").setAnalyzer("my_synonym_analyzer").get(); + assertEquals(2, analyzeResponse.getTokens().size()); + assertEquals("foo", analyzeResponse.getTokens().get(0).getTerm()); + assertEquals("baz", analyzeResponse.getTokens().get(1).getTerm()); + + // now update synonyms file and trigger reloading + try (PrintWriter out = new PrintWriter( + new OutputStreamWriter(Files.newOutputStream(synonymsFile, StandardOpenOption.WRITE), StandardCharsets.UTF_8))) { + out.println("foo, baz, buzz"); + } + assertNoFailures(client().execute(ReloadAnalyzerAction.INSTANCE, new ReloadAnalyzersRequest("test")).actionGet()); + + analyzeResponse = client().admin().indices().prepareAnalyze("test", "Foo").setAnalyzer("my_synonym_analyzer").get(); + assertEquals(3, analyzeResponse.getTokens().size()); + Set tokens = new HashSet<>(); + analyzeResponse.getTokens().stream().map(AnalyzeToken::getTerm).forEach(t -> tokens.add(t)); + assertTrue(tokens.contains("foo")); + assertTrue(tokens.contains("baz")); + assertTrue(tokens.contains("buzz")); + + response = client().prepareSearch("test").setQuery(QueryBuilders.matchQuery("field", "baz")).get(); + assertHitCount(response, 1L); + response = client().prepareSearch("test").setQuery(QueryBuilders.matchQuery("field", "buzz")).get(); + assertHitCount(response, 1L); + } +} \ No newline at end of file diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rest/action/ReloadSynonymAnalyzerIT.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rest/action/ReloadSynonymAnalyzerIT.java new file mode 100644 index 00000000000..790fefb7437 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rest/action/ReloadSynonymAnalyzerIT.java @@ -0,0 +1,120 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.rest.action; + +import org.elasticsearch.action.admin.indices.analyze.AnalyzeAction.AnalyzeToken; +import org.elasticsearch.action.admin.indices.analyze.AnalyzeAction.Response; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.analysis.common.CommonAnalysisPlugin; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.InternalTestCluster; +import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.action.ReloadAnalyzerAction; +import org.elasticsearch.xpack.core.action.ReloadAnalyzersRequest; +import org.elasticsearch.xpack.core.action.ReloadAnalyzersResponse; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.OutputStreamWriter; +import java.io.PrintWriter; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashSet; +import java.util.Set; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; + +public class ReloadSynonymAnalyzerIT extends ESIntegTestCase { + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put(XPackSettings.SECURITY_ENABLED.getKey(), false).build(); + } + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(LocalStateCompositeXPackPlugin.class, CommonAnalysisPlugin.class); + } + + /** + * This test needs to write to the config directory, this is difficult in an external cluster so we overwrite this to force running with + * {@link InternalTestCluster} + */ + @Override + protected boolean ignoreExternalCluster() { + return true; + } + + public void testSynonymsUpdateable() throws FileNotFoundException, IOException, InterruptedException { + Path config = internalCluster().getInstance(Environment.class).configFile(); + String synonymsFileName = "synonyms.txt"; + Path synonymsFile = config.resolve(synonymsFileName); + Files.createFile(synonymsFile); + assertTrue(Files.exists(synonymsFile)); + try (PrintWriter out = new PrintWriter( + new OutputStreamWriter(Files.newOutputStream(synonymsFile, StandardOpenOption.CREATE), StandardCharsets.UTF_8))) { + out.println("foo, baz"); + } + assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder() + .put("index.number_of_shards", cluster().numDataNodes() * 2) + .put("index.number_of_replicas", 1) + .put("analysis.analyzer.my_synonym_analyzer.tokenizer", "standard") + .put("analysis.analyzer.my_synonym_analyzer.filter", "my_synonym_filter") + .put("analysis.filter.my_synonym_filter.type", "synonym") + .put("analysis.filter.my_synonym_filter.updateable", "true") + .put("analysis.filter.my_synonym_filter.synonyms_path", synonymsFileName)) + .addMapping("_doc", "field", "type=text,analyzer=standard,search_analyzer=my_synonym_analyzer")); + + client().prepareIndex("test", "_doc", "1").setSource("field", "foo").get(); + assertNoFailures(client().admin().indices().prepareRefresh("test").execute().actionGet()); + + SearchResponse response = client().prepareSearch("test").setQuery(QueryBuilders.matchQuery("field", "baz")).get(); + assertHitCount(response, 1L); + response = client().prepareSearch("test").setQuery(QueryBuilders.matchQuery("field", "buzz")).get(); + assertHitCount(response, 0L); + Response analyzeResponse = client().admin().indices().prepareAnalyze("test", "foo").setAnalyzer("my_synonym_analyzer").get(); + assertEquals(2, analyzeResponse.getTokens().size()); + assertEquals("foo", analyzeResponse.getTokens().get(0).getTerm()); + assertEquals("baz", analyzeResponse.getTokens().get(1).getTerm()); + + // now update synonyms file several times and trigger reloading + for (int i = 0; i < 10; i++) { + String testTerm = randomAlphaOfLength(10); + try (PrintWriter out = new PrintWriter( + new OutputStreamWriter(Files.newOutputStream(synonymsFile, StandardOpenOption.WRITE), StandardCharsets.UTF_8))) { + out.println("foo, baz, " + testTerm); + } + ReloadAnalyzersResponse reloadResponse = client().execute(ReloadAnalyzerAction.INSTANCE, new ReloadAnalyzersRequest("test")) + .actionGet(); + assertNoFailures(reloadResponse); + assertEquals(cluster().numDataNodes(), reloadResponse.getSuccessfulShards()); + + analyzeResponse = client().admin().indices().prepareAnalyze("test", "foo").setAnalyzer("my_synonym_analyzer").get(); + assertEquals(3, analyzeResponse.getTokens().size()); + Set tokens = new HashSet<>(); + analyzeResponse.getTokens().stream().map(AnalyzeToken::getTerm).forEach(t -> tokens.add(t)); + assertTrue(tokens.contains("foo")); + assertTrue(tokens.contains("baz")); + assertTrue(tokens.contains(testTerm)); + + response = client().prepareSearch("test").setQuery(QueryBuilders.matchQuery("field", "baz")).get(); + assertHitCount(response, 1L); + response = client().prepareSearch("test").setQuery(QueryBuilders.matchQuery("field", testTerm)).get(); + assertHitCount(response, 1L); + } + } +} \ No newline at end of file diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/indices.reload_search_analyzers.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/indices.reload_search_analyzers.json new file mode 100644 index 00000000000..bd79dbf4718 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/indices.reload_search_analyzers.json @@ -0,0 +1,33 @@ +{ + "indices.reload_search_analyzers": { + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-reload-analyzers.html", + "stability": "experimental", + "methods": ["GET", "POST"], + "url": { + "paths": ["/{index}/_reload_search_analyzers"], + "parts": { + "index": { + "type": "list", + "description" : "A comma-separated list of index names to reload analyzers for" + } + }, + "params": { + "ignore_unavailable": { + "type" : "boolean", + "description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)" + }, + "allow_no_indices": { + "type" : "boolean", + "description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)" + }, + "expand_wildcards": { + "type" : "enum", + "options" : ["open","closed","none","all"], + "default" : "open", + "description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both." + } + } + }, + "body": null + } +} From cab879118d429544c1fc53b976313792317b5944 Mon Sep 17 00:00:00 2001 From: Dimitris Athanasiou Date: Fri, 28 Jun 2019 13:28:03 +0300 Subject: [PATCH 23/42] [7.x][ML] Support multiple source indices for df-analytics (#43702) (#43731) This commit adds support for multiple source indices. In order to deal with multiple indices having different mappings, it attempts a best-effort approach to merge the mappings assuming there are no conflicts. In case conflicts exists an error will be returned. To allow users creating custom mappings for special use cases, the destination index is now allowed to exist before the analytics job runs. In addition, settings are no longer copied except for the `index.number_of_shards` and `index.number_of_replicas`. --- .../dataframe/DataFrameAnalyticsSource.java | 23 ++- .../MlClientDocumentationIT.java | 16 +- .../DataFrameAnalyticsSourceTests.java | 2 +- .../dataframe/DataFrameAnalyticsSource.java | 31 ++-- .../xpack/core/ml/job/messages/Messages.java | 3 +- .../DataFrameAnalyticsSourceTests.java | 2 +- .../ml/qa/ml-with-security/build.gradle | 3 +- .../integration/RunDataFrameAnalyticsIT.java | 115 ++++++++++++- .../xpack/ml/MachineLearning.java | 2 +- ...ransportStartDataFrameAnalyticsAction.java | 33 ++-- .../ml/dataframe/DataFrameAnalyticsIndex.java | 158 +++++++++++++----- .../dataframe/DataFrameAnalyticsManager.java | 37 +++- .../xpack/ml/dataframe/MappingsMerger.java | 100 +++++++++++ .../ml/dataframe/SourceDestValidator.java | 11 +- .../DataFrameDataExtractorFactory.java | 8 +- .../extractor/ExtractedFieldsDetector.java | 29 ++-- .../DataFrameAnalyticsIndexTests.java | 142 ++++++++++------ .../ml/dataframe/MappingsMergerTests.java | 153 +++++++++++++++++ .../dataframe/SourceDestValidatorTests.java | 20 ++- .../ExtractedFieldsDetectorTests.java | 6 +- .../test/ml/data_frame_analytics_crud.yml | 34 +++- 21 files changed, 741 insertions(+), 187 deletions(-) create mode 100644 x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/MappingsMerger.java create mode 100644 x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/MappingsMergerTests.java diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsSource.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsSource.java index c36799cd3b4..9a6de159bea 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsSource.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsSource.java @@ -28,6 +28,8 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; +import java.util.Arrays; +import java.util.List; import java.util.Objects; public class DataFrameAnalyticsSource implements ToXContentObject { @@ -46,19 +48,19 @@ public class DataFrameAnalyticsSource implements ToXContentObject { private static ObjectParser PARSER = new ObjectParser<>("data_frame_analytics_source", true, Builder::new); static { - PARSER.declareString(Builder::setIndex, INDEX); + PARSER.declareStringArray(Builder::setIndex, INDEX); PARSER.declareObject(Builder::setQueryConfig, (p, c) -> QueryConfig.fromXContent(p), QUERY); } - private final String index; + private final String[] index; private final QueryConfig queryConfig; - private DataFrameAnalyticsSource(String index, @Nullable QueryConfig queryConfig) { + private DataFrameAnalyticsSource(String[] index, @Nullable QueryConfig queryConfig) { this.index = Objects.requireNonNull(index); this.queryConfig = queryConfig; } - public String getIndex() { + public String[] getIndex() { return index; } @@ -83,13 +85,13 @@ public class DataFrameAnalyticsSource implements ToXContentObject { if (o == null || getClass() != o.getClass()) return false; DataFrameAnalyticsSource other = (DataFrameAnalyticsSource) o; - return Objects.equals(index, other.index) + return Arrays.equals(index, other.index) && Objects.equals(queryConfig, other.queryConfig); } @Override public int hashCode() { - return Objects.hash(index, queryConfig); + return Objects.hash(Arrays.asList(index), queryConfig); } @Override @@ -99,16 +101,21 @@ public class DataFrameAnalyticsSource implements ToXContentObject { public static class Builder { - private String index; + private String[] index; private QueryConfig queryConfig; private Builder() {} - public Builder setIndex(String index) { + public Builder setIndex(String... index) { this.index = index; return this; } + public Builder setIndex(List index) { + this.index = index.toArray(new String[0]); + return this; + } + public Builder setQueryConfig(QueryConfig queryConfig) { this.queryConfig = queryConfig; return this; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java index 526e31a5da1..5c9017b7706 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java @@ -2802,7 +2802,7 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase { } public void testGetDataFrameAnalytics() throws Exception { - createIndex(DF_ANALYTICS_CONFIG.getSource().getIndex()); + createIndex(DF_ANALYTICS_CONFIG.getSource().getIndex()[0]); RestHighLevelClient client = highLevelClient(); client.machineLearning().putDataFrameAnalytics(new PutDataFrameAnalyticsRequest(DF_ANALYTICS_CONFIG), RequestOptions.DEFAULT); @@ -2851,7 +2851,7 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase { } public void testGetDataFrameAnalyticsStats() throws Exception { - createIndex(DF_ANALYTICS_CONFIG.getSource().getIndex()); + createIndex(DF_ANALYTICS_CONFIG.getSource().getIndex()[0]); RestHighLevelClient client = highLevelClient(); client.machineLearning().putDataFrameAnalytics(new PutDataFrameAnalyticsRequest(DF_ANALYTICS_CONFIG), RequestOptions.DEFAULT); @@ -2901,7 +2901,7 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase { } public void testPutDataFrameAnalytics() throws Exception { - createIndex(DF_ANALYTICS_CONFIG.getSource().getIndex()); + createIndex(DF_ANALYTICS_CONFIG.getSource().getIndex()[0]); RestHighLevelClient client = highLevelClient(); { @@ -2994,7 +2994,7 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase { } public void testDeleteDataFrameAnalytics() throws Exception { - createIndex(DF_ANALYTICS_CONFIG.getSource().getIndex()); + createIndex(DF_ANALYTICS_CONFIG.getSource().getIndex()[0]); RestHighLevelClient client = highLevelClient(); client.machineLearning().putDataFrameAnalytics(new PutDataFrameAnalyticsRequest(DF_ANALYTICS_CONFIG), RequestOptions.DEFAULT); @@ -3044,9 +3044,9 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase { } public void testStartDataFrameAnalytics() throws Exception { - createIndex(DF_ANALYTICS_CONFIG.getSource().getIndex()); + createIndex(DF_ANALYTICS_CONFIG.getSource().getIndex()[0]); highLevelClient().index( - new IndexRequest(DF_ANALYTICS_CONFIG.getSource().getIndex()).source(XContentType.JSON, "total", 10000) + new IndexRequest(DF_ANALYTICS_CONFIG.getSource().getIndex()[0]).source(XContentType.JSON, "total", 10000) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE), RequestOptions.DEFAULT); RestHighLevelClient client = highLevelClient(); client.machineLearning().putDataFrameAnalytics(new PutDataFrameAnalyticsRequest(DF_ANALYTICS_CONFIG), RequestOptions.DEFAULT); @@ -3101,9 +3101,9 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase { } public void testStopDataFrameAnalytics() throws Exception { - createIndex(DF_ANALYTICS_CONFIG.getSource().getIndex()); + createIndex(DF_ANALYTICS_CONFIG.getSource().getIndex()[0]); highLevelClient().index( - new IndexRequest(DF_ANALYTICS_CONFIG.getSource().getIndex()).source(XContentType.JSON, "total", 10000) + new IndexRequest(DF_ANALYTICS_CONFIG.getSource().getIndex()[0]).source(XContentType.JSON, "total", 10000) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE), RequestOptions.DEFAULT); RestHighLevelClient client = highLevelClient(); client.machineLearning().putDataFrameAnalytics(new PutDataFrameAnalyticsRequest(DF_ANALYTICS_CONFIG), RequestOptions.DEFAULT); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsSourceTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsSourceTests.java index eb254fd23de..c556b2e053c 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsSourceTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsSourceTests.java @@ -36,7 +36,7 @@ public class DataFrameAnalyticsSourceTests extends AbstractXContentTestCase createParser(boolean ignoreUnknownFields) { ConstructingObjectParser parser = new ConstructingObjectParser<>("data_frame_analytics_source", - ignoreUnknownFields, a -> new DataFrameAnalyticsSource((String) a[0], (QueryProvider) a[1])); - parser.declareString(ConstructingObjectParser.constructorArg(), INDEX); + ignoreUnknownFields, a -> new DataFrameAnalyticsSource(((List) a[0]).toArray(new String[0]), (QueryProvider) a[1])); + parser.declareStringArray(ConstructingObjectParser.constructorArg(), INDEX); parser.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> QueryProvider.fromXContent(p, ignoreUnknownFields, Messages.DATA_FRAME_ANALYTICS_BAD_QUERY_FORMAT), QUERY); return parser; } - private final String index; + private final String[] index; private final QueryProvider queryProvider; - public DataFrameAnalyticsSource(String index, @Nullable QueryProvider queryProvider) { + public DataFrameAnalyticsSource(String[] index, @Nullable QueryProvider queryProvider) { this.index = ExceptionsHelper.requireNonNull(index, INDEX); - if (index.isEmpty()) { - throw ExceptionsHelper.badRequestException("[{}] must be non-empty", INDEX); + if (index.length == 0) { + throw new IllegalArgumentException("source.index must specify at least one index"); + } + if (Arrays.stream(index).anyMatch(Strings::isNullOrEmpty)) { + throw new IllegalArgumentException("source.index must contain non-null and non-empty strings"); } this.queryProvider = queryProvider == null ? QueryProvider.defaultQuery() : queryProvider; } public DataFrameAnalyticsSource(StreamInput in) throws IOException { - index = in.readString(); + index = in.readStringArray(); queryProvider = QueryProvider.fromStream(in); } public DataFrameAnalyticsSource(DataFrameAnalyticsSource other) { - this.index = other.index; + this.index = Arrays.copyOf(other.index, other.index.length); this.queryProvider = new QueryProvider(other.queryProvider); } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeString(index); + out.writeStringArray(index); queryProvider.writeTo(out); } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.field(INDEX.getPreferredName(), index); + builder.array(INDEX.getPreferredName(), index); builder.field(QUERY.getPreferredName(), queryProvider.getQuery()); builder.endObject(); return builder; @@ -83,16 +88,16 @@ public class DataFrameAnalyticsSource implements Writeable, ToXContentObject { if (o == null || getClass() != o.getClass()) return false; DataFrameAnalyticsSource other = (DataFrameAnalyticsSource) o; - return Objects.equals(index, other.index) + return Arrays.equals(index, other.index) && Objects.equals(queryProvider, other.queryProvider); } @Override public int hashCode() { - return Objects.hash(index, queryProvider); + return Objects.hash(Arrays.asList(index), queryProvider); } - public String getIndex() { + public String[] getIndex() { return index; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java index 417184f8a75..dfb95d2adac 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java @@ -51,8 +51,7 @@ public final class Messages { public static final String DATAFEED_ID_ALREADY_TAKEN = "A datafeed with id [{0}] already exists"; public static final String DATA_FRAME_ANALYTICS_BAD_QUERY_FORMAT = "Data Frame Analytics config query is not parsable"; - public static final String DATA_FRAME_ANALYTICS_BAD_FIELD_FILTER = - "No compatible fields could be detected in index [{0}] with name [{1}]"; + public static final String DATA_FRAME_ANALYTICS_BAD_FIELD_FILTER = "No field [{0}] could be detected"; public static final String FILTER_CANNOT_DELETE = "Cannot delete filter [{0}] currently used by jobs {1}"; public static final String FILTER_CONTAINS_TOO_MANY_ITEMS = "Filter [{0}] contains too many items; up to [{1}] items are allowed"; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/DataFrameAnalyticsSourceTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/DataFrameAnalyticsSourceTests.java index 7783354d425..1db3477111d 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/DataFrameAnalyticsSourceTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/DataFrameAnalyticsSourceTests.java @@ -44,7 +44,7 @@ public class DataFrameAnalyticsSourceTests extends AbstractSerializingTestCase validateListener = ActionListener.wrap( + ActionListener configListener = ActionListener.wrap( config -> memoryTracker.addDataFrameAnalyticsJobMemoryAndRefreshAllOthers( request.getId(), config.getModelMemoryLimit().getBytes(), memoryRequirementRefreshListener), listener::onFailure ); - // Validate config - ActionListener configListener = ActionListener.wrap( - config -> { - new SourceDestValidator(clusterService.state(), indexNameExpressionResolver).check(config); - DataFrameDataExtractorFactory.validateConfigAndSourceIndex(client, config, validateListener); - }, - listener::onFailure + // Get config + getConfigAndValidate(request.getId(), configListener); + } + + private void getConfigAndValidate(String id, ActionListener finalListener) { + // Validate mappings can be merged + ActionListener firstValidationListener = ActionListener.wrap( + config -> MappingsMerger.mergeMappings(client, config.getHeaders(), config.getSource().getIndex(), ActionListener.wrap( + mappings -> finalListener.onResponse(config), finalListener::onFailure)), + finalListener::onFailure ); - // Get config - configProvider.get(request.getId(), configListener); + // Validate source and dest; check data extraction is possible + ActionListener getConfigListener = ActionListener.wrap( + config -> { + new SourceDestValidator(clusterService.state(), indexNameExpressionResolver).check(config); + DataFrameDataExtractorFactory.validateConfigAndSourceIndex(client, config, firstValidationListener); + }, + finalListener::onFailure + ); + + // First, get the config + configProvider.get(id, getConfigListener); } private void waitForAnalyticsStarted(PersistentTasksCustomMetaData.PersistentTask task, diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsIndex.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsIndex.java index 11812cd4f5b..e07eb99a3f5 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsIndex.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsIndex.java @@ -10,23 +10,35 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.create.CreateIndexAction; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; +import org.elasticsearch.action.admin.indices.get.GetIndexResponse; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.elasticsearch.action.admin.indices.settings.get.GetSettingsAction; +import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest; +import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MappingMetaData; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexSortConfig; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsConfig; import java.time.Clock; -import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; -import java.util.List; +import java.util.Iterator; import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.Supplier; +import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; + /** * {@link DataFrameAnalyticsIndex} class encapsulates logic for creating destination index based on source index metadata. */ @@ -36,57 +48,103 @@ final class DataFrameAnalyticsIndex { private static final String META = "_meta"; /** - * Unfortunately, getting the settings of an index include internal settings that should - * not be set explicitly. There is no way to filter those out. Thus, we have to maintain - * a list of them and filter them out manually. + * We only preserve the most important settings. + * If the user needs other settings on the destination index they + * should create the destination index before starting the analytics. */ - private static final List INTERNAL_SETTINGS = Arrays.asList( - "index.creation_date", - "index.provided_name", - "index.uuid", - "index.version.created", - "index.version.upgraded" - ); + private static final String[] PRESERVED_SETTINGS = new String[] {"index.number_of_shards", "index.number_of_replicas"}; + + private DataFrameAnalyticsIndex() {} /** * Creates destination index based on source index metadata. */ public static void createDestinationIndex(Client client, Clock clock, - ClusterState clusterState, DataFrameAnalyticsConfig analyticsConfig, ActionListener listener) { - String sourceIndex = analyticsConfig.getSource().getIndex(); - Map headers = analyticsConfig.getHeaders(); - IndexMetaData sourceIndexMetaData = clusterState.getMetaData().getIndices().get(sourceIndex); - if (sourceIndexMetaData == null) { - listener.onFailure(new IndexNotFoundException(sourceIndex)); - return; - } - CreateIndexRequest createIndexRequest = - prepareCreateIndexRequest(sourceIndexMetaData, analyticsConfig.getDest().getIndex(), analyticsConfig.getId(), clock); - ClientHelper.executeWithHeadersAsync( - headers, ClientHelper.ML_ORIGIN, client, CreateIndexAction.INSTANCE, createIndexRequest, listener); + ActionListener createIndexRequestListener = ActionListener.wrap( + createIndexRequest -> ClientHelper.executeWithHeadersAsync(analyticsConfig.getHeaders(), ClientHelper.ML_ORIGIN, client, + CreateIndexAction.INSTANCE, createIndexRequest, listener), + listener::onFailure + ); + + prepareCreateIndexRequest(client, clock, analyticsConfig, createIndexRequestListener); } - private static CreateIndexRequest prepareCreateIndexRequest(IndexMetaData sourceIndexMetaData, - String destinationIndex, - String analyticsId, - Clock clock) { - // Settings - Settings.Builder settingsBuilder = Settings.builder().put(sourceIndexMetaData.getSettings()); - INTERNAL_SETTINGS.forEach(settingsBuilder::remove); + private static void prepareCreateIndexRequest(Client client, Clock clock, DataFrameAnalyticsConfig config, + ActionListener listener) { + AtomicReference settingsHolder = new AtomicReference<>(); + + String[] sourceIndex = config.getSource().getIndex(); + + ActionListener> mappingsListener = ActionListener.wrap( + mappings -> listener.onResponse(createIndexRequest(clock, config, settingsHolder.get(), mappings)), + listener::onFailure + ); + + ActionListener settingsListener = ActionListener.wrap( + settings -> { + settingsHolder.set(settings); + MappingsMerger.mergeMappings(client, config.getHeaders(), sourceIndex, mappingsListener); + }, + listener::onFailure + ); + + ActionListener getSettingsResponseListener = ActionListener.wrap( + settingsResponse -> settingsListener.onResponse(settings(settingsResponse)), + listener::onFailure + ); + + GetSettingsRequest getSettingsRequest = new GetSettingsRequest(); + getSettingsRequest.indices(sourceIndex); + getSettingsRequest.indicesOptions(IndicesOptions.lenientExpandOpen()); + getSettingsRequest.names(PRESERVED_SETTINGS); + ClientHelper.executeWithHeadersAsync(config.getHeaders(), ML_ORIGIN, client, GetSettingsAction.INSTANCE, + getSettingsRequest, getSettingsResponseListener); + } + + private static CreateIndexRequest createIndexRequest(Clock clock, DataFrameAnalyticsConfig config, Settings settings, + ImmutableOpenMap mappings) { + // There should only be 1 type + assert mappings.size() == 1; + + String destinationIndex = config.getDest().getIndex(); + String type = mappings.keysIt().next(); + Map mappingsAsMap = mappings.valuesIt().next().sourceAsMap(); + addProperties(mappingsAsMap); + addMetaData(mappingsAsMap, config.getId(), clock); + return new CreateIndexRequest(destinationIndex, settings).mapping(type, mappingsAsMap); + } + + private static Settings settings(GetSettingsResponse settingsResponse) { + Integer maxNumberOfShards = findMaxSettingValue(settingsResponse, IndexMetaData.SETTING_NUMBER_OF_SHARDS); + Integer maxNumberOfReplicas = findMaxSettingValue(settingsResponse, IndexMetaData.SETTING_NUMBER_OF_REPLICAS); + + Settings.Builder settingsBuilder = Settings.builder(); settingsBuilder.put(IndexSortConfig.INDEX_SORT_FIELD_SETTING.getKey(), DataFrameAnalyticsFields.ID); settingsBuilder.put(IndexSortConfig.INDEX_SORT_ORDER_SETTING.getKey(), SortOrder.ASC); - Settings settings = settingsBuilder.build(); + if (maxNumberOfShards != null) { + settingsBuilder.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, maxNumberOfShards); + } + if (maxNumberOfReplicas != null) { + settingsBuilder.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, maxNumberOfReplicas); + } + return settingsBuilder.build(); + } - // Mappings - String singleMappingType = sourceIndexMetaData.getMappings().keysIt().next(); - Map mappingsAsMap = sourceIndexMetaData.getMappings().valuesIt().next().sourceAsMap(); - addProperties(mappingsAsMap); - addMetaData(mappingsAsMap, analyticsId, clock); - - return new CreateIndexRequest(destinationIndex, settings).mapping(singleMappingType, mappingsAsMap); + @Nullable + private static Integer findMaxSettingValue(GetSettingsResponse settingsResponse, String settingKey) { + Integer maxValue = null; + Iterator settingsIterator = settingsResponse.getIndexToSettings().valuesIt(); + while (settingsIterator.hasNext()) { + Settings settings = settingsIterator.next(); + Integer indexValue = settings.getAsInt(settingKey, null); + if (indexValue != null) { + maxValue = maxValue == null ? indexValue : Math.max(indexValue, maxValue); + } + } + return maxValue; } private static void addProperties(Map mappingsAsMap) { @@ -115,6 +173,22 @@ final class DataFrameAnalyticsIndex { return value; } - private DataFrameAnalyticsIndex() {} + public static void updateMappingsToDestIndex(Client client, DataFrameAnalyticsConfig analyticsConfig, GetIndexResponse getIndexResponse, + ActionListener listener) { + // We have validated the destination index should match a single index + assert getIndexResponse.indices().length == 1; + + ImmutableOpenMap mappings = getIndexResponse.getMappings().get(getIndexResponse.indices()[0]); + String type = mappings.keysIt().next(); + + Map addedMappings = Collections.singletonMap(PROPERTIES, + Collections.singletonMap(DataFrameAnalyticsFields.ID, Collections.singletonMap("type", "keyword"))); + + PutMappingRequest putMappingRequest = new PutMappingRequest(getIndexResponse.indices()); + putMappingRequest.type(type); + putMappingRequest.source(addedMappings); + ClientHelper.executeWithHeadersAsync(analyticsConfig.getHeaders(), ML_ORIGIN, client, PutMappingAction.INSTANCE, + putMappingRequest, listener); + } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsManager.java index 764ca08d735..c7cfe2b6253 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsManager.java @@ -5,16 +5,20 @@ */ package org.elasticsearch.xpack.ml.dataframe; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; +import org.elasticsearch.action.admin.indices.get.GetIndexAction; +import org.elasticsearch.action.admin.indices.get.GetIndexRequest; +import org.elasticsearch.action.admin.indices.get.GetIndexResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshAction; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.support.ContextPreservingActionListener; import org.elasticsearch.client.node.NodeClient; -import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.reindex.BulkByScrollResponse; @@ -40,17 +44,17 @@ import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; public class DataFrameAnalyticsManager { - private final ClusterService clusterService; + private static final Logger LOGGER = LogManager.getLogger(DataFrameAnalyticsManager.class); + /** - * We need a {@link NodeClient} to be get the reindexing task and be able to report progress + * We need a {@link NodeClient} to get the reindexing task and be able to report progress */ private final NodeClient client; private final DataFrameAnalyticsConfigProvider configProvider; private final AnalyticsProcessManager processManager; - public DataFrameAnalyticsManager(ClusterService clusterService, NodeClient client, DataFrameAnalyticsConfigProvider configProvider, + public DataFrameAnalyticsManager(NodeClient client, DataFrameAnalyticsConfigProvider configProvider, AnalyticsProcessManager processManager) { - this.clusterService = Objects.requireNonNull(clusterService); this.client = Objects.requireNonNull(client); this.configProvider = Objects.requireNonNull(configProvider); this.processManager = Objects.requireNonNull(processManager); @@ -77,7 +81,6 @@ public class DataFrameAnalyticsManager { break; // The task has fully reindexed the documents and we should continue on with our analyses case ANALYZING: - // TODO apply previously stored model state if applicable startAnalytics(task, config, true); break; // If we are already at REINDEXING, we are not 100% sure if we reindexed ALL the docs. @@ -160,7 +163,27 @@ public class DataFrameAnalyticsManager { reindexCompletedListener::onFailure ); - DataFrameAnalyticsIndex.createDestinationIndex(client, Clock.systemUTC(), clusterService.state(), config, copyIndexCreatedListener); + // Create destination index if it does not exist + ActionListener destIndexListener = ActionListener.wrap( + indexResponse -> { + LOGGER.info("[{}] Using existing destination index [{}]", config.getId(), indexResponse.indices()[0]); + DataFrameAnalyticsIndex.updateMappingsToDestIndex(client, config, indexResponse, ActionListener.wrap( + acknowledgedResponse -> copyIndexCreatedListener.onResponse(null), + copyIndexCreatedListener::onFailure + )); + }, + e -> { + if (org.elasticsearch.ExceptionsHelper.unwrapCause(e) instanceof IndexNotFoundException) { + LOGGER.info("[{}] Creating destination index [{}]", config.getId(), config.getDest().getIndex()); + DataFrameAnalyticsIndex.createDestinationIndex(client, Clock.systemUTC(), config, copyIndexCreatedListener); + } else { + copyIndexCreatedListener.onFailure(e); + } + } + ); + + ClientHelper.executeWithHeadersAsync(config.getHeaders(), ML_ORIGIN, client, GetIndexAction.INSTANCE, + new GetIndexRequest().indices(config.getDest().getIndex()), destIndexListener); } private void startAnalytics(DataFrameAnalyticsTask task, DataFrameAnalyticsConfig config, boolean isTaskRestarting) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/MappingsMerger.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/MappingsMerger.java new file mode 100644 index 00000000000..f007831f7cf --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/MappingsMerger.java @@ -0,0 +1,100 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.dataframe; + +import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsAction; +import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest; +import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.MappingMetaData; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.xpack.core.ClientHelper; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; + +import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; + +/** + * Merges mappings in a best effort and naive manner. + * The merge will fail if there is any conflict, i.e. the mappings of a field are not exactly the same. + */ +public final class MappingsMerger { + + private MappingsMerger() {} + + public static void mergeMappings(Client client, Map headers, String[] index, + ActionListener> listener) { + ActionListener mappingsListener = ActionListener.wrap( + getMappingsResponse -> listener.onResponse(MappingsMerger.mergeMappings(getMappingsResponse)), + listener::onFailure + ); + + GetMappingsRequest getMappingsRequest = new GetMappingsRequest(); + getMappingsRequest.indices(index); + ClientHelper.executeWithHeadersAsync(headers, ML_ORIGIN, client, GetMappingsAction.INSTANCE, getMappingsRequest, mappingsListener); + } + + static ImmutableOpenMap mergeMappings(GetMappingsResponse getMappingsResponse) { + ImmutableOpenMap> indexToMappings = getMappingsResponse.getMappings(); + + String type = null; + Map mergedMappings = new HashMap<>(); + + Iterator>> iterator = indexToMappings.iterator(); + while (iterator.hasNext()) { + ObjectObjectCursor> indexMappings = iterator.next(); + Iterator> typeIterator = indexMappings.value.iterator(); + while (typeIterator.hasNext()) { + ObjectObjectCursor typeMapping = typeIterator.next(); + if (type == null) { + type = typeMapping.key; + } else { + if (type.equals(typeMapping.key) == false) { + throw ExceptionsHelper.badRequestException("source indices contain mappings for different types: [{}, {}]", + type, typeMapping.key); + } + } + Map currentMappings = typeMapping.value.getSourceAsMap(); + if (currentMappings.containsKey("properties")) { + + @SuppressWarnings("unchecked") + Map fieldMappings = (Map) currentMappings.get("properties"); + + for (Map.Entry fieldMapping : fieldMappings.entrySet()) { + if (mergedMappings.containsKey(fieldMapping.getKey())) { + if (mergedMappings.get(fieldMapping.getKey()).equals(fieldMapping.getValue()) == false) { + throw ExceptionsHelper.badRequestException("cannot merge mappings because of differences for field [{}]", + fieldMapping.getKey()); + } + } else { + mergedMappings.put(fieldMapping.getKey(), fieldMapping.getValue()); + } + } + } + } + } + + MappingMetaData mappingMetaData = createMappingMetaData(type, mergedMappings); + ImmutableOpenMap.Builder result = ImmutableOpenMap.builder(); + result.put(type, mappingMetaData); + return result.build(); + } + + private static MappingMetaData createMappingMetaData(String type, Map mappings) { + try { + return new MappingMetaData(type, Collections.singletonMap("properties", mappings)); + } catch (IOException e) { + throw ExceptionsHelper.serverError("Failed to parse mappings: " + mappings); + } + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/SourceDestValidator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/SourceDestValidator.java index f607387e317..01803dc4359 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/SourceDestValidator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/SourceDestValidator.java @@ -29,10 +29,13 @@ public class SourceDestValidator { } public void check(DataFrameAnalyticsConfig config) { - String sourceIndex = config.getSource().getIndex(); + String[] sourceIndex = config.getSource().getIndex(); String destIndex = config.getDest().getIndex(); - String[] sourceExpressions = Strings.tokenizeToStringArray(sourceIndex, ","); + String[] sourceExpressions = Arrays.stream(sourceIndex) + .map(index -> Strings.tokenizeToStringArray(index, ",")) + .flatMap(Arrays::stream) + .toArray(String[]::new); for (String sourceExpression : sourceExpressions) { if (Regex.simpleMatch(sourceExpression, destIndex)) { @@ -45,7 +48,7 @@ public class SourceDestValidator { IndicesOptions.lenientExpandOpen(), sourceExpressions))); if (concreteSourceIndexNames.isEmpty()) { - throw ExceptionsHelper.badRequestException("No index matches source index [{}]", sourceIndex); + throw ExceptionsHelper.badRequestException("No index matches source index {}", Arrays.toString(sourceIndex)); } final String[] concreteDestIndexNames = indexNameExpressionResolver.concreteIndexNames(clusterState, @@ -59,7 +62,7 @@ public class SourceDestValidator { if (concreteDestIndexNames.length == 1 && concreteSourceIndexNames.contains(concreteDestIndexNames[0])) { // In case the dest index is an alias, we need to check the concrete index is not matched by source throw ExceptionsHelper.badRequestException("Destination index [{}], which is an alias for [{}], " + - "must not be included in source index [{}]", destIndex, concreteDestIndexNames[0], sourceIndex); + "must not be included in source index {}", destIndex, concreteDestIndexNames[0], Arrays.toString(sourceIndex)); } } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractorFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractorFactory.java index baf77c420c5..cacf00ad9e9 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractorFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractorFactory.java @@ -13,6 +13,7 @@ import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesAction; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; +import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.Client; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; @@ -73,7 +74,7 @@ public class DataFrameDataExtractorFactory { DataFrameAnalyticsConfig config, boolean isTaskRestarting, ActionListener listener) { - validateIndexAndExtractFields(client, config.getDest().getIndex(), config, isTaskRestarting, + validateIndexAndExtractFields(client, new String[] {config.getDest().getIndex()}, config, isTaskRestarting, ActionListener.wrap(extractedFields -> listener.onResponse(new DataFrameDataExtractorFactory( client, config.getId(), config.getDest().getIndex(), extractedFields, config.getHeaders())), listener::onFailure @@ -100,7 +101,7 @@ public class DataFrameDataExtractorFactory { } private static void validateIndexAndExtractFields(Client client, - String index, + String[] index, DataFrameAnalyticsConfig config, boolean isTaskRestarting, ActionListener listener) { @@ -120,6 +121,7 @@ public class DataFrameDataExtractorFactory { FieldCapabilitiesRequest fieldCapabilitiesRequest = new FieldCapabilitiesRequest(); fieldCapabilitiesRequest.indices(index); + fieldCapabilitiesRequest.indicesOptions(IndicesOptions.lenientExpandOpen()); fieldCapabilitiesRequest.fields("*"); ClientHelper.executeWithHeaders(config.getHeaders(), ClientHelper.ML_ORIGIN, client, () -> { client.execute(FieldCapabilitiesAction.INSTANCE, fieldCapabilitiesRequest, fieldCapabilitiesHandler); @@ -134,7 +136,7 @@ public class DataFrameDataExtractorFactory { getDocValueFieldsLimit(client, index, docValueFieldsLimitListener); } - private static void getDocValueFieldsLimit(Client client, String index, ActionListener docValueFieldsLimitListener) { + private static void getDocValueFieldsLimit(Client client, String[] index, ActionListener docValueFieldsLimitListener) { ActionListener settingsListener = ActionListener.wrap(getSettingsResponse -> { Integer minDocValueFieldsLimit = Integer.MAX_VALUE; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/ExtractedFieldsDetector.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/ExtractedFieldsDetector.java index b36fc6f182a..d58eaebe353 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/ExtractedFieldsDetector.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/ExtractedFieldsDetector.java @@ -55,13 +55,13 @@ public class ExtractedFieldsDetector { COMPATIBLE_FIELD_TYPES = Collections.unmodifiableSet(compatibleTypes); } - private final String index; + private final String[] index; private final DataFrameAnalyticsConfig config; private final boolean isTaskRestarting; private final int docValueFieldsLimit; private final FieldCapabilitiesResponse fieldCapabilitiesResponse; - ExtractedFieldsDetector(String index, DataFrameAnalyticsConfig config, boolean isTaskRestarting, int docValueFieldsLimit, + ExtractedFieldsDetector(String[] index, DataFrameAnalyticsConfig config, boolean isTaskRestarting, int docValueFieldsLimit, FieldCapabilitiesResponse fieldCapabilitiesResponse) { this.index = Objects.requireNonNull(index); this.config = Objects.requireNonNull(config); @@ -74,7 +74,7 @@ public class ExtractedFieldsDetector { Set fields = new HashSet<>(fieldCapabilitiesResponse.get().keySet()); fields.removeAll(IGNORE_FIELDS); - checkResultsFieldIsNotPresent(fields, index); + checkResultsFieldIsNotPresent(); // Ignore fields under the results object fields.removeIf(field -> field.startsWith(config.getDest().getResultsField() + ".")); @@ -87,7 +87,7 @@ public class ExtractedFieldsDetector { ExtractedFields extractedFields = ExtractedFields.build(sortedFields, Collections.emptySet(), fieldCapabilitiesResponse) .filterFields(ExtractedField.ExtractionMethod.DOC_VALUE); if (extractedFields.getAllFields().isEmpty()) { - throw ExceptionsHelper.badRequestException("No compatible fields could be detected in index [{}]", index); + throw ExceptionsHelper.badRequestException("No compatible fields could be detected in index {}", Arrays.toString(index)); } if (extractedFields.getDocValueFields().size() > docValueFieldsLimit) { extractedFields = fetchFromSourceIfSupported(extractedFields); @@ -100,11 +100,16 @@ public class ExtractedFieldsDetector { return extractedFields; } - private void checkResultsFieldIsNotPresent(Set fields, String index) { + private void checkResultsFieldIsNotPresent() { // If the task is restarting we do not mind the index containing the results field, we will overwrite all docs - if (isTaskRestarting == false && fields.contains(config.getDest().getResultsField())) { - throw ExceptionsHelper.badRequestException("Index [{}] already has a field that matches the {}.{} [{}];" + - " please set a different {}", index, DataFrameAnalyticsConfig.DEST.getPreferredName(), + if (isTaskRestarting) { + return; + } + + Map indexToFieldCaps = fieldCapabilitiesResponse.getField(config.getDest().getResultsField()); + if (indexToFieldCaps != null && indexToFieldCaps.isEmpty() == false) { + throw ExceptionsHelper.badRequestException("A field that matches the {}.{} [{}] already exists;" + + " please set a different {}", DataFrameAnalyticsConfig.DEST.getPreferredName(), DataFrameAnalyticsDest.RESULTS_FIELD.getPreferredName(), config.getDest().getResultsField(), DataFrameAnalyticsDest.RESULTS_FIELD.getPreferredName()); } @@ -121,7 +126,7 @@ public class ExtractedFieldsDetector { } } - private void includeAndExcludeFields(Set fields, String index) { + private void includeAndExcludeFields(Set fields, String[] index) { FetchSourceContext analyzedFields = config.getAnalyzedFields(); if (analyzedFields == null) { return; @@ -136,12 +141,14 @@ public class ExtractedFieldsDetector { // If the inclusion set does not match anything, that means the user's desired fields cannot be found in // the collection of supported field types. We should let the user know. Set includedSet = NameResolver.newUnaliased(fields, - (ex) -> new ResourceNotFoundException(Messages.getMessage(Messages.DATA_FRAME_ANALYTICS_BAD_FIELD_FILTER, index, ex))) + (ex) -> new ResourceNotFoundException( + Messages.getMessage(Messages.DATA_FRAME_ANALYTICS_BAD_FIELD_FILTER, ex))) .expand(includes, false); // If the exclusion set does not match anything, that means the fields are already not present // no need to raise if nothing matched Set excludedSet = NameResolver.newUnaliased(fields, - (ex) -> new ResourceNotFoundException(Messages.getMessage(Messages.DATA_FRAME_ANALYTICS_BAD_FIELD_FILTER, index, ex))) + (ex) -> new ResourceNotFoundException( + Messages.getMessage(Messages.DATA_FRAME_ANALYTICS_BAD_FIELD_FILTER, ex))) .expand(excludes, true); fields.retainAll(includedSet); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsIndexTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsIndexTests.java index 74d5526519d..80391de519e 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsIndexTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsIndexTests.java @@ -10,17 +10,21 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.create.CreateIndexAction; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; +import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsAction; +import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest; +import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; +import org.elasticsearch.action.admin.indices.settings.get.GetSettingsAction; +import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest; +import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; +import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MappingMetaData; -import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; -import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsConfig; @@ -33,27 +37,25 @@ import java.io.IOException; import java.time.Clock; import java.time.Instant; import java.time.ZoneId; +import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.Map; import static org.elasticsearch.common.xcontent.support.XContentMapValues.extractValue; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.instanceOf; import static org.mockito.Matchers.any; import static org.mockito.Matchers.eq; -import static org.mockito.Mockito.atLeastOnce; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyNoMoreInteractions; import static org.mockito.Mockito.when; public class DataFrameAnalyticsIndexTests extends ESTestCase { - private static final String CLUSTER_NAME = "some-cluster-name"; - private static final String ANALYTICS_ID = "some-analytics-id"; - private static final String SOURCE_INDEX = "source-index"; + private static final String[] SOURCE_INDEX = new String[] {"source-index"}; private static final String DEST_INDEX = "dest-index"; private static final DataFrameAnalyticsConfig ANALYTICS_CONFIG = new DataFrameAnalyticsConfig.Builder(ANALYTICS_ID) @@ -71,6 +73,8 @@ public class DataFrameAnalyticsIndexTests extends ESTestCase { public void testCreateDestinationIndex() throws IOException { when(client.threadPool()).thenReturn(threadPool); when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); + + ArgumentCaptor createIndexRequestCaptor = ArgumentCaptor.forClass(CreateIndexRequest.class); doAnswer( invocationOnMock -> { @SuppressWarnings("unchecked") @@ -78,60 +82,102 @@ public class DataFrameAnalyticsIndexTests extends ESTestCase { listener.onResponse(null); return null; }) - .when(client).execute(any(), any(), any()); + .when(client).execute(eq(CreateIndexAction.INSTANCE), createIndexRequestCaptor.capture(), any()); + + Settings index1Settings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .build(); + + Settings index2Settings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 5) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) + .build(); + + ArgumentCaptor getSettingsRequestCaptor = ArgumentCaptor.forClass(GetSettingsRequest.class); + ArgumentCaptor getMappingsRequestCaptor = ArgumentCaptor.forClass(GetMappingsRequest.class); + + ImmutableOpenMap.Builder indexToSettings = ImmutableOpenMap.builder(); + indexToSettings.put("index_1", index1Settings); + indexToSettings.put("index_2", index2Settings); + + GetSettingsResponse getSettingsResponse = new GetSettingsResponse(indexToSettings.build(), ImmutableOpenMap.of()); + + doAnswer( + invocationOnMock -> { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[2]; + listener.onResponse(getSettingsResponse); + return null; + } + ).when(client).execute(eq(GetSettingsAction.INSTANCE), getSettingsRequestCaptor.capture(), any()); + + Map index1Properties = new HashMap<>(); + index1Properties.put("field_1", "field_1_mappings"); + index1Properties.put("field_2", "field_2_mappings"); + Map index1Mappings = Collections.singletonMap("properties", index1Properties); + MappingMetaData index1MappingMetaData = new MappingMetaData("_doc", index1Mappings); + + Map index2Properties = new HashMap<>(); + index2Properties.put("field_1", "field_1_mappings"); + index2Properties.put("field_2", "field_2_mappings"); + Map index2Mappings = Collections.singletonMap("properties", index2Properties); + MappingMetaData index2MappingMetaData = new MappingMetaData("_doc", index2Mappings); + + ImmutableOpenMap.Builder index1MappingsMap = ImmutableOpenMap.builder(); + index1MappingsMap.put("_doc", index1MappingMetaData); + ImmutableOpenMap.Builder index2MappingsMap = ImmutableOpenMap.builder(); + index2MappingsMap.put("_doc", index2MappingMetaData); + + ImmutableOpenMap.Builder> mappings = ImmutableOpenMap.builder(); + mappings.put("index_1", index1MappingsMap.build()); + mappings.put("index_2", index2MappingsMap.build()); + + GetMappingsResponse getMappingsResponse = new GetMappingsResponse(mappings.build()); + + doAnswer( + invocationOnMock -> { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[2]; + listener.onResponse(getMappingsResponse); + return null; + } + ).when(client).execute(eq(GetMappingsAction.INSTANCE), getMappingsRequestCaptor.capture(), any()); - Map propertiesMapping = new HashMap<>(); - propertiesMapping.put("properties", new HashMap<>()); - ClusterState clusterState = - ClusterState.builder(new ClusterName(CLUSTER_NAME)) - .metaData(MetaData.builder() - .put(IndexMetaData.builder(SOURCE_INDEX) - .settings(Settings.builder() - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)) - .putMapping(new MappingMetaData("_doc", propertiesMapping)))) - .build(); DataFrameAnalyticsIndex.createDestinationIndex( client, clock, - clusterState, ANALYTICS_CONFIG, ActionListener.wrap( response -> {}, e -> fail(e.getMessage()))); - ArgumentCaptor createIndexRequestCaptor = ArgumentCaptor.forClass(CreateIndexRequest.class); - verify(client, atLeastOnce()).threadPool(); - verify(client).execute(eq(CreateIndexAction.INSTANCE), createIndexRequestCaptor.capture(), any()); - verifyNoMoreInteractions(client); + GetSettingsRequest capturedGetSettingsRequest = getSettingsRequestCaptor.getValue(); + assertThat(capturedGetSettingsRequest.indices(), equalTo(SOURCE_INDEX)); + assertThat(capturedGetSettingsRequest.indicesOptions(), equalTo(IndicesOptions.lenientExpandOpen())); + assertThat(Arrays.asList(capturedGetSettingsRequest.names()), contains("index.number_of_shards", "index.number_of_replicas")); + + assertThat(getMappingsRequestCaptor.getValue().indices(), equalTo(SOURCE_INDEX)); CreateIndexRequest createIndexRequest = createIndexRequestCaptor.getValue(); + + assertThat(createIndexRequest.settings().keySet(), + containsInAnyOrder("index.number_of_shards", "index.number_of_replicas", "index.sort.field", "index.sort.order")); + assertThat(createIndexRequest.settings().getAsInt("index.number_of_shards", -1), equalTo(5)); + assertThat(createIndexRequest.settings().getAsInt("index.number_of_replicas", -1), equalTo(1)); + assertThat(createIndexRequest.settings().get("index.sort.field"), equalTo("_id_copy")); + assertThat(createIndexRequest.settings().get("index.sort.order"), equalTo("asc")); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, createIndexRequest.mappings().get("_doc"))) { Map map = parser.map(); assertThat(extractValue("_doc.properties._id_copy.type", map), equalTo("keyword")); + assertThat(extractValue("_doc.properties.field_1", map), equalTo("field_1_mappings")); + assertThat(extractValue("_doc.properties.field_2", map), equalTo("field_2_mappings")); assertThat(extractValue("_doc._meta.analytics", map), equalTo(ANALYTICS_ID)); assertThat(extractValue("_doc._meta.creation_date_in_millis", map), equalTo(CURRENT_TIME_MILLIS)); assertThat(extractValue("_doc._meta.created_by", map), equalTo(CREATED_BY)); } } - - public void testCreateDestinationIndex_IndexNotFound() { - ClusterState clusterState = - ClusterState.builder(new ClusterName(CLUSTER_NAME)) - .metaData(MetaData.builder()) - .build(); - DataFrameAnalyticsIndex.createDestinationIndex( - client, - clock, - clusterState, - ANALYTICS_CONFIG, - ActionListener.wrap( - response -> fail("IndexNotFoundException should be thrown"), - e -> { - assertThat(e, instanceOf(IndexNotFoundException.class)); - IndexNotFoundException infe = (IndexNotFoundException) e; - assertThat(infe.getIndex().getName(), equalTo(SOURCE_INDEX)); - })); - } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/MappingsMergerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/MappingsMergerTests.java new file mode 100644 index 00000000000..5c7b08ba1c7 --- /dev/null +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/MappingsMergerTests.java @@ -0,0 +1,153 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.dataframe; + +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; +import org.elasticsearch.cluster.metadata.MappingMetaData; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; + +public class MappingsMergerTests extends ESTestCase { + + public void testMergeMappings_GivenIndicesWithIdenticalMappings() throws IOException { + Map index1Properties = new HashMap<>(); + index1Properties.put("field_1", "field_1_mappings"); + index1Properties.put("field_2", "field_2_mappings"); + Map index1Mappings = Collections.singletonMap("properties", index1Properties); + MappingMetaData index1MappingMetaData = new MappingMetaData("_doc", index1Mappings); + + Map index2Properties = new HashMap<>(); + index2Properties.put("field_1", "field_1_mappings"); + index2Properties.put("field_2", "field_2_mappings"); + Map index2Mappings = Collections.singletonMap("properties", index2Properties); + MappingMetaData index2MappingMetaData = new MappingMetaData("_doc", index2Mappings); + + ImmutableOpenMap.Builder index1MappingsMap = ImmutableOpenMap.builder(); + index1MappingsMap.put("_doc", index1MappingMetaData); + ImmutableOpenMap.Builder index2MappingsMap = ImmutableOpenMap.builder(); + index2MappingsMap.put("_doc", index2MappingMetaData); + + ImmutableOpenMap.Builder> mappings = ImmutableOpenMap.builder(); + mappings.put("index_1", index1MappingsMap.build()); + mappings.put("index_2", index2MappingsMap.build()); + + GetMappingsResponse getMappingsResponse = new GetMappingsResponse(mappings.build()); + + ImmutableOpenMap mergedMappings = MappingsMerger.mergeMappings(getMappingsResponse); + + assertThat(mergedMappings.size(), equalTo(1)); + assertThat(mergedMappings.containsKey("_doc"), is(true)); + assertThat(mergedMappings.valuesIt().next().getSourceAsMap(), equalTo(index1Mappings)); + } + + public void testMergeMappings_GivenIndicesWithDifferentTypes() throws IOException { + Map index1Mappings = Collections.singletonMap("properties", + Collections.singletonMap("field_1", "field_1_mappings")); + MappingMetaData index1MappingMetaData = new MappingMetaData("type_1", index1Mappings); + + Map index2Mappings = Collections.singletonMap("properties", + Collections.singletonMap("field_1", "field_1_mappings")); + MappingMetaData index2MappingMetaData = new MappingMetaData("type_2", index2Mappings); + + ImmutableOpenMap.Builder index1MappingsMap = ImmutableOpenMap.builder(); + index1MappingsMap.put("type_1", index1MappingMetaData); + ImmutableOpenMap.Builder index2MappingsMap = ImmutableOpenMap.builder(); + index2MappingsMap.put("type_2", index2MappingMetaData); + + ImmutableOpenMap.Builder> mappings = ImmutableOpenMap.builder(); + mappings.put("index_1", index1MappingsMap.build()); + mappings.put("index_2", index2MappingsMap.build()); + + GetMappingsResponse getMappingsResponse = new GetMappingsResponse(mappings.build()); + + ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, + () -> MappingsMerger.mergeMappings(getMappingsResponse)); + assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST)); + assertThat(e.getMessage(), containsString("source indices contain mappings for different types:")); + assertThat(e.getMessage(), containsString("type_1")); + assertThat(e.getMessage(), containsString("type_2")); + } + + public void testMergeMappings_GivenFieldWithDifferentMapping() throws IOException { + Map index1Mappings = Collections.singletonMap("properties", + Collections.singletonMap("field_1", "field_1_mappings")); + MappingMetaData index1MappingMetaData = new MappingMetaData("_doc", index1Mappings); + + Map index2Mappings = Collections.singletonMap("properties", + Collections.singletonMap("field_1", "different_field_1_mappings")); + MappingMetaData index2MappingMetaData = new MappingMetaData("_doc", index2Mappings); + + ImmutableOpenMap.Builder index1MappingsMap = ImmutableOpenMap.builder(); + index1MappingsMap.put("_doc", index1MappingMetaData); + ImmutableOpenMap.Builder index2MappingsMap = ImmutableOpenMap.builder(); + index2MappingsMap.put("_doc", index2MappingMetaData); + + ImmutableOpenMap.Builder> mappings = ImmutableOpenMap.builder(); + mappings.put("index_1", index1MappingsMap.build()); + mappings.put("index_2", index2MappingsMap.build()); + + GetMappingsResponse getMappingsResponse = new GetMappingsResponse(mappings.build()); + + ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, + () -> MappingsMerger.mergeMappings(getMappingsResponse)); + assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST)); + assertThat(e.getMessage(), equalTo("cannot merge mappings because of differences for field [field_1]")); + } + + public void testMergeMappings_GivenIndicesWithDifferentMappingsButNoConflicts() throws IOException { + Map index1Properties = new HashMap<>(); + index1Properties.put("field_1", "field_1_mappings"); + index1Properties.put("field_2", "field_2_mappings"); + Map index1Mappings = Collections.singletonMap("properties", index1Properties); + MappingMetaData index1MappingMetaData = new MappingMetaData("_doc", index1Mappings); + + Map index2Properties = new HashMap<>(); + index2Properties.put("field_1", "field_1_mappings"); + index2Properties.put("field_3", "field_3_mappings"); + Map index2Mappings = Collections.singletonMap("properties", index2Properties); + MappingMetaData index2MappingMetaData = new MappingMetaData("_doc", index2Mappings); + + ImmutableOpenMap.Builder index1MappingsMap = ImmutableOpenMap.builder(); + index1MappingsMap.put("_doc", index1MappingMetaData); + ImmutableOpenMap.Builder index2MappingsMap = ImmutableOpenMap.builder(); + index2MappingsMap.put("_doc", index2MappingMetaData); + + ImmutableOpenMap.Builder> mappings = ImmutableOpenMap.builder(); + mappings.put("index_1", index1MappingsMap.build()); + mappings.put("index_2", index2MappingsMap.build()); + + GetMappingsResponse getMappingsResponse = new GetMappingsResponse(mappings.build()); + + ImmutableOpenMap mergedMappings = MappingsMerger.mergeMappings(getMappingsResponse); + + assertThat(mergedMappings.size(), equalTo(1)); + assertThat(mergedMappings.containsKey("_doc"), is(true)); + Map mappingsAsMap = mergedMappings.valuesIt().next().getSourceAsMap(); + assertThat(mappingsAsMap.size(), equalTo(1)); + assertThat(mappingsAsMap.containsKey("properties"), is(true)); + + @SuppressWarnings("unchecked") + Map fieldMappings = (Map) mappingsAsMap.get("properties"); + + assertThat(fieldMappings.size(), equalTo(3)); + assertThat(fieldMappings.keySet(), containsInAnyOrder("field_1", "field_2", "field_3")); + assertThat(fieldMappings.get("field_1"), equalTo("field_1_mappings")); + assertThat(fieldMappings.get("field_2"), equalTo("field_2_mappings")); + assertThat(fieldMappings.get("field_3"), equalTo("field_3_mappings")); + } +} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/SourceDestValidatorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/SourceDestValidatorTests.java index fb91673b7a5..d48d079314a 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/SourceDestValidatorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/SourceDestValidatorTests.java @@ -64,7 +64,7 @@ public class SourceDestValidatorTests extends ESTestCase { public void testCheck_GivenSimpleSourceIndexAndValidDestIndex() { DataFrameAnalyticsConfig config = new DataFrameAnalyticsConfig.Builder("test") - .setSource(new DataFrameAnalyticsSource("source-1", null)) + .setSource(createSource("source-1")) .setDest(new DataFrameAnalyticsDest("dest", null)) .setAnalysis(new OutlierDetection()) .build(); @@ -75,7 +75,7 @@ public class SourceDestValidatorTests extends ESTestCase { public void testCheck_GivenMissingConcreteSourceIndex() { DataFrameAnalyticsConfig config = new DataFrameAnalyticsConfig.Builder("test") - .setSource(new DataFrameAnalyticsSource("missing", null)) + .setSource(createSource("missing")) .setDest(new DataFrameAnalyticsDest("dest", null)) .setAnalysis(new OutlierDetection()) .build(); @@ -89,7 +89,7 @@ public class SourceDestValidatorTests extends ESTestCase { public void testCheck_GivenMissingWildcardSourceIndex() { DataFrameAnalyticsConfig config = new DataFrameAnalyticsConfig.Builder("test") - .setSource(new DataFrameAnalyticsSource("missing*", null)) + .setSource(createSource("missing*")) .setDest(new DataFrameAnalyticsDest("dest", null)) .setAnalysis(new OutlierDetection()) .build(); @@ -103,7 +103,7 @@ public class SourceDestValidatorTests extends ESTestCase { public void testCheck_GivenDestIndexSameAsSourceIndex() { DataFrameAnalyticsConfig config = new DataFrameAnalyticsConfig.Builder("test") - .setSource(new DataFrameAnalyticsSource("source-1", null)) + .setSource(createSource("source-1")) .setDest(new DataFrameAnalyticsDest("source-1", null)) .setAnalysis(new OutlierDetection()) .build(); @@ -117,7 +117,7 @@ public class SourceDestValidatorTests extends ESTestCase { public void testCheck_GivenDestIndexMatchesSourceIndex() { DataFrameAnalyticsConfig config = new DataFrameAnalyticsConfig.Builder("test") - .setSource(new DataFrameAnalyticsSource("source-*", null)) + .setSource(createSource("source-*")) .setDest(new DataFrameAnalyticsDest(SOURCE_2, null)) .setAnalysis(new OutlierDetection()) .build(); @@ -131,7 +131,7 @@ public class SourceDestValidatorTests extends ESTestCase { public void testCheck_GivenDestIndexMatchesOneOfSourceIndices() { DataFrameAnalyticsConfig config = new DataFrameAnalyticsConfig.Builder("test") - .setSource(new DataFrameAnalyticsSource("source-1,source-*", null)) + .setSource(createSource("source-1,source-*")) .setDest(new DataFrameAnalyticsDest(SOURCE_2, null)) .setAnalysis(new OutlierDetection()) .build(); @@ -145,7 +145,7 @@ public class SourceDestValidatorTests extends ESTestCase { public void testCheck_GivenDestIndexIsAliasThatMatchesMultipleIndices() { DataFrameAnalyticsConfig config = new DataFrameAnalyticsConfig.Builder("test") - .setSource(new DataFrameAnalyticsSource(SOURCE_1, null)) + .setSource(createSource(SOURCE_1)) .setDest(new DataFrameAnalyticsDest("dest-alias", null)) .setAnalysis(new OutlierDetection()) .build(); @@ -160,7 +160,7 @@ public class SourceDestValidatorTests extends ESTestCase { public void testCheck_GivenDestIndexIsAliasThatIsIncludedInSource() { DataFrameAnalyticsConfig config = new DataFrameAnalyticsConfig.Builder("test") - .setSource(new DataFrameAnalyticsSource("source-1", null)) + .setSource(createSource("source-1")) .setDest(new DataFrameAnalyticsDest("source-1-alias", null)) .setAnalysis(new OutlierDetection()) .build(); @@ -173,4 +173,8 @@ public class SourceDestValidatorTests extends ESTestCase { equalTo("Destination index [source-1-alias], which is an alias for [source-1], " + "must not be included in source index [source-1]")); } + + private static DataFrameAnalyticsSource createSource(String... index) { + return new DataFrameAnalyticsSource(index, null); + } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/extractor/ExtractedFieldsDetectorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/extractor/ExtractedFieldsDetectorTests.java index c035c44f117..1345a1fe128 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/extractor/ExtractedFieldsDetectorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/extractor/ExtractedFieldsDetectorTests.java @@ -33,7 +33,7 @@ import static org.mockito.Mockito.when; public class ExtractedFieldsDetectorTests extends ESTestCase { - private static final String SOURCE_INDEX = "source_index"; + private static final String[] SOURCE_INDEX = new String[] { "source_index" }; private static final String DEST_INDEX = "dest_index"; private static final String RESULTS_FIELD = "ml"; @@ -154,7 +154,7 @@ public class ExtractedFieldsDetectorTests extends ESTestCase { SOURCE_INDEX, buildAnalyticsConfig(desiredFields), false, 100, fieldCapabilities); ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, () -> extractedFieldsDetector.detect()); - assertThat(e.getMessage(), equalTo("No compatible fields could be detected in index [source_index] with name [your_field1]")); + assertThat(e.getMessage(), equalTo("No field [your_field1] could be detected")); } public void testDetectedExtractedFields_GivenExcludeAllValidFields() { @@ -202,7 +202,7 @@ public class ExtractedFieldsDetectorTests extends ESTestCase { SOURCE_INDEX, buildAnalyticsConfig(), false, 100, fieldCapabilities); ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, () -> extractedFieldsDetector.detect()); - assertThat(e.getMessage(), equalTo("Index [source_index] already has a field that matches the dest.results_field [ml]; " + + assertThat(e.getMessage(), equalTo("A field that matches the dest.results_field [ml] already exists; " + "please set a different results_field")); } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/data_frame_analytics_crud.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/data_frame_analytics_crud.yml index 01afb7714f3..168173e648e 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/data_frame_analytics_crud.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/data_frame_analytics_crud.yml @@ -50,7 +50,7 @@ setup: "analyzed_fields": [ "obj1.*", "obj2.*" ] } - match: { id: "simple-outlier-detection-with-query" } - - match: { source.index: "index-source" } + - match: { source.index: ["index-source"] } - match: { source.query: {"term" : { "user" : "Kimchy"} } } - match: { dest.index: "index-dest" } - match: { analysis: {"outlier_detection":{}} } @@ -63,7 +63,7 @@ setup: id: "simple-outlier-detection-with-query" - match: { count: 1 } - match: { data_frame_analytics.0.id: "simple-outlier-detection-with-query" } - - match: { data_frame_analytics.0.source.index: "index-source" } + - match: { data_frame_analytics.0.source.index: ["index-source"] } - match: { data_frame_analytics.0.source.query: {"term" : { "user" : "Kimchy"} } } - match: { data_frame_analytics.0.dest.index: "index-dest" } - match: { data_frame_analytics.0.analysis: {"outlier_detection":{}} } @@ -145,7 +145,7 @@ setup: "analysis": {"outlier_detection":{}} } - match: { id: "simple-outlier-detection" } - - match: { source.index: "index-source" } + - match: { source.index: ["index-source"] } - match: { source.query: {"match_all" : {} } } - match: { dest.index: "index-dest" } - match: { analysis: {"outlier_detection":{}} } @@ -175,7 +175,7 @@ setup: } } - match: { id: "custom-outlier-detection" } - - match: { source.index: "index-source" } + - match: { source.index: ["index-source"] } - match: { source.query: {"match_all" : {} } } - match: { dest.index: "index-dest" } - match: { analysis.outlier_detection.n_neighbors: 5 } @@ -427,16 +427,34 @@ setup: } --- -"Test put config given source with empty index": +"Test put config given source with empty index array": - do: - catch: /\[index\] must be non-empty/ + catch: /source\.index must specify at least one index/ ml.put_data_frame_analytics: id: "simple-outlier-detection" body: > { "source": { - "index": "" + "index": [] + }, + "dest": { + "index": "index-dest" + }, + "analysis": {"outlier_detection":{}} + } + +--- +"Test put config given source with empty string in index array": + + - do: + catch: /source\.index must contain non-null and non-empty strings/ + ml.put_data_frame_analytics: + id: "simple-outlier-detection" + body: > + { + "source": { + "index": [""] }, "dest": { "index": "index-dest" @@ -889,7 +907,7 @@ setup: "analyzed_fields": [ "obj1.*", "obj2.*" ] } - match: { id: "simple-outlier-detection-with-query" } - - match: { source.index: "index-source" } + - match: { source.index: ["index-source"] } - match: { source.query: {"term" : { "user" : "Kimchy"} } } - match: { dest.index: "index-dest" } - match: { analysis: {"outlier_detection":{}} } From 86c853a7c2277c513227de3babfb03a13b2956c4 Mon Sep 17 00:00:00 2001 From: Dimitris Athanasiou Date: Fri, 28 Jun 2019 13:28:25 +0300 Subject: [PATCH 24/42] [7.x][ML] Rename outlier score setting to feature_influence_threshold (#43705) (#43734) Renames outlier score setting `minimum_score_to_write_feature_influence` to `feature_influence_threshold`. --- .../client/ml/dataframe/OutlierDetection.java | 33 +++++++++--------- .../ml/dataframe/OutlierDetectionTests.java | 8 ++--- .../dataframe/analyses/OutlierDetection.java | 34 +++++++++---------- .../persistence/ElasticsearchMappings.java | 2 +- .../ml/job/results/ReservedFieldNames.java | 2 +- .../analyses/OutlierDetectionTests.java | 2 +- .../test/ml/data_frame_analytics_crud.yml | 4 +-- 7 files changed, 41 insertions(+), 44 deletions(-) diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/OutlierDetection.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/OutlierDetection.java index 946c01ac5c8..fe5094fb719 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/OutlierDetection.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/OutlierDetection.java @@ -47,8 +47,7 @@ public class OutlierDetection implements DataFrameAnalysis { public static final ParseField NAME = new ParseField("outlier_detection"); static final ParseField N_NEIGHBORS = new ParseField("n_neighbors"); static final ParseField METHOD = new ParseField("method"); - public static final ParseField MINIMUM_SCORE_TO_WRITE_FEATURE_INFLUENCE = - new ParseField("minimum_score_to_write_feature_influence"); + public static final ParseField FEATURE_INFLUENCE_THRESHOLD = new ParseField("feature_influence_threshold"); private static ObjectParser PARSER = new ObjectParser<>(NAME.getPreferredName(), true, Builder::new); @@ -60,23 +59,23 @@ public class OutlierDetection implements DataFrameAnalysis { } throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]"); }, METHOD, ObjectParser.ValueType.STRING); - PARSER.declareDouble(Builder::setMinScoreToWriteFeatureInfluence, MINIMUM_SCORE_TO_WRITE_FEATURE_INFLUENCE); + PARSER.declareDouble(Builder::setFeatureInfluenceThreshold, FEATURE_INFLUENCE_THRESHOLD); } private final Integer nNeighbors; private final Method method; - private final Double minScoreToWriteFeatureInfluence; + private final Double featureInfluenceThreshold; /** * Constructs the outlier detection configuration * @param nNeighbors The number of neighbors. Leave unspecified for dynamic detection. * @param method The method. Leave unspecified for a dynamic mixture of methods. - * @param minScoreToWriteFeatureInfluence The min outlier score required to calculate feature influence. Defaults to 0.1. + * @param featureInfluenceThreshold The min outlier score required to calculate feature influence. Defaults to 0.1. */ - private OutlierDetection(@Nullable Integer nNeighbors, @Nullable Method method, @Nullable Double minScoreToWriteFeatureInfluence) { + private OutlierDetection(@Nullable Integer nNeighbors, @Nullable Method method, @Nullable Double featureInfluenceThreshold) { this.nNeighbors = nNeighbors; this.method = method; - this.minScoreToWriteFeatureInfluence = minScoreToWriteFeatureInfluence; + this.featureInfluenceThreshold = featureInfluenceThreshold; } @Override @@ -92,8 +91,8 @@ public class OutlierDetection implements DataFrameAnalysis { return method; } - public Double getMinScoreToWriteFeatureInfluence() { - return minScoreToWriteFeatureInfluence; + public Double getFeatureInfluenceThreshold() { + return featureInfluenceThreshold; } @Override @@ -105,8 +104,8 @@ public class OutlierDetection implements DataFrameAnalysis { if (method != null) { builder.field(METHOD.getPreferredName(), method); } - if (minScoreToWriteFeatureInfluence != null) { - builder.field(MINIMUM_SCORE_TO_WRITE_FEATURE_INFLUENCE.getPreferredName(), minScoreToWriteFeatureInfluence); + if (featureInfluenceThreshold != null) { + builder.field(FEATURE_INFLUENCE_THRESHOLD.getPreferredName(), featureInfluenceThreshold); } builder.endObject(); return builder; @@ -120,12 +119,12 @@ public class OutlierDetection implements DataFrameAnalysis { OutlierDetection other = (OutlierDetection) o; return Objects.equals(nNeighbors, other.nNeighbors) && Objects.equals(method, other.method) - && Objects.equals(minScoreToWriteFeatureInfluence, other.minScoreToWriteFeatureInfluence); + && Objects.equals(featureInfluenceThreshold, other.featureInfluenceThreshold); } @Override public int hashCode() { - return Objects.hash(nNeighbors, method, minScoreToWriteFeatureInfluence); + return Objects.hash(nNeighbors, method, featureInfluenceThreshold); } @Override @@ -150,7 +149,7 @@ public class OutlierDetection implements DataFrameAnalysis { private Integer nNeighbors; private Method method; - private Double minScoreToWriteFeatureInfluence; + private Double featureInfluenceThreshold; private Builder() {} @@ -164,13 +163,13 @@ public class OutlierDetection implements DataFrameAnalysis { return this; } - public Builder setMinScoreToWriteFeatureInfluence(Double minScoreToWriteFeatureInfluence) { - this.minScoreToWriteFeatureInfluence = minScoreToWriteFeatureInfluence; + public Builder setFeatureInfluenceThreshold(Double featureInfluenceThreshold) { + this.featureInfluenceThreshold = featureInfluenceThreshold; return this; } public OutlierDetection build() { - return new OutlierDetection(nNeighbors, method, minScoreToWriteFeatureInfluence); + return new OutlierDetection(nNeighbors, method, featureInfluenceThreshold); } } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/OutlierDetectionTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/OutlierDetectionTests.java index de110d92fde..7307999a2bf 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/OutlierDetectionTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/OutlierDetectionTests.java @@ -33,7 +33,7 @@ public class OutlierDetectionTests extends AbstractXContentTestCase LENIENT_PARSER = createParser(true); private static final ConstructingObjectParser STRICT_PARSER = createParser(false); @@ -43,7 +42,7 @@ public class OutlierDetection implements DataFrameAnalysis { } throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]"); }, METHOD, ObjectParser.ValueType.STRING); - parser.declareDouble(ConstructingObjectParser.optionalConstructorArg(), MINIMUM_SCORE_TO_WRITE_FEATURE_INFLUENCE); + parser.declareDouble(ConstructingObjectParser.optionalConstructorArg(), FEATURE_INFLUENCE_THRESHOLD); return parser; } @@ -53,27 +52,26 @@ public class OutlierDetection implements DataFrameAnalysis { private final Integer nNeighbors; private final Method method; - private final Double minScoreToWriteFeatureInfluence; + private final Double featureInfluenceThreshold; /** * Constructs the outlier detection configuration * @param nNeighbors The number of neighbors. Leave unspecified for dynamic detection. * @param method The method. Leave unspecified for a dynamic mixture of methods. - * @param minScoreToWriteFeatureInfluence The min outlier score required to calculate feature influence. Defaults to 0.1. + * @param featureInfluenceThreshold The min outlier score required to calculate feature influence. Defaults to 0.1. */ - public OutlierDetection(@Nullable Integer nNeighbors, @Nullable Method method, @Nullable Double minScoreToWriteFeatureInfluence) { + public OutlierDetection(@Nullable Integer nNeighbors, @Nullable Method method, @Nullable Double featureInfluenceThreshold) { if (nNeighbors != null && nNeighbors <= 0) { throw ExceptionsHelper.badRequestException("[{}] must be a positive integer", N_NEIGHBORS.getPreferredName()); } - if (minScoreToWriteFeatureInfluence != null && (minScoreToWriteFeatureInfluence < 0.0 || minScoreToWriteFeatureInfluence > 1.0)) { - throw ExceptionsHelper.badRequestException("[{}] must be in [0, 1]", - MINIMUM_SCORE_TO_WRITE_FEATURE_INFLUENCE.getPreferredName()); + if (featureInfluenceThreshold != null && (featureInfluenceThreshold < 0.0 || featureInfluenceThreshold > 1.0)) { + throw ExceptionsHelper.badRequestException("[{}] must be in [0, 1]", FEATURE_INFLUENCE_THRESHOLD.getPreferredName()); } this.nNeighbors = nNeighbors; this.method = method; - this.minScoreToWriteFeatureInfluence = minScoreToWriteFeatureInfluence; + this.featureInfluenceThreshold = featureInfluenceThreshold; } /** @@ -86,7 +84,7 @@ public class OutlierDetection implements DataFrameAnalysis { public OutlierDetection(StreamInput in) throws IOException { nNeighbors = in.readOptionalVInt(); method = in.readBoolean() ? in.readEnum(Method.class) : null; - minScoreToWriteFeatureInfluence = in.readOptionalDouble(); + featureInfluenceThreshold = in.readOptionalDouble(); } @Override @@ -105,7 +103,7 @@ public class OutlierDetection implements DataFrameAnalysis { out.writeBoolean(false); } - out.writeOptionalDouble(minScoreToWriteFeatureInfluence); + out.writeOptionalDouble(featureInfluenceThreshold); } @Override @@ -117,8 +115,8 @@ public class OutlierDetection implements DataFrameAnalysis { if (method != null) { builder.field(METHOD.getPreferredName(), method); } - if (minScoreToWriteFeatureInfluence != null) { - builder.field(MINIMUM_SCORE_TO_WRITE_FEATURE_INFLUENCE.getPreferredName(), minScoreToWriteFeatureInfluence); + if (featureInfluenceThreshold != null) { + builder.field(FEATURE_INFLUENCE_THRESHOLD.getPreferredName(), featureInfluenceThreshold); } builder.endObject(); return builder; @@ -131,12 +129,12 @@ public class OutlierDetection implements DataFrameAnalysis { OutlierDetection that = (OutlierDetection) o; return Objects.equals(nNeighbors, that.nNeighbors) && Objects.equals(method, that.method) - && Objects.equals(minScoreToWriteFeatureInfluence, that.minScoreToWriteFeatureInfluence); + && Objects.equals(featureInfluenceThreshold, that.featureInfluenceThreshold); } @Override public int hashCode() { - return Objects.hash(nNeighbors, method, minScoreToWriteFeatureInfluence); + return Objects.hash(nNeighbors, method, featureInfluenceThreshold); } @Override @@ -148,8 +146,8 @@ public class OutlierDetection implements DataFrameAnalysis { if (method != null) { params.put(METHOD.getPreferredName(), method); } - if (minScoreToWriteFeatureInfluence != null) { - params.put(MINIMUM_SCORE_TO_WRITE_FEATURE_INFLUENCE.getPreferredName(), minScoreToWriteFeatureInfluence); + if (featureInfluenceThreshold != null) { + params.put(FEATURE_INFLUENCE_THRESHOLD.getPreferredName(), featureInfluenceThreshold); } return params; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java index 75ce2d53315..0fc7770758a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java @@ -432,7 +432,7 @@ public class ElasticsearchMappings { .startObject(OutlierDetection.METHOD.getPreferredName()) .field(TYPE, KEYWORD) .endObject() - .startObject(OutlierDetection.MINIMUM_SCORE_TO_WRITE_FEATURE_INFLUENCE.getPreferredName()) + .startObject(OutlierDetection.FEATURE_INFLUENCE_THRESHOLD.getPreferredName()) .field(TYPE, DOUBLE) .endObject() .endObject() diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ReservedFieldNames.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ReservedFieldNames.java index eff33a37d97..2b3497c0aff 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ReservedFieldNames.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ReservedFieldNames.java @@ -286,7 +286,7 @@ public final class ReservedFieldNames { OutlierDetection.NAME.getPreferredName(), OutlierDetection.N_NEIGHBORS.getPreferredName(), OutlierDetection.METHOD.getPreferredName(), - OutlierDetection.MINIMUM_SCORE_TO_WRITE_FEATURE_INFLUENCE.getPreferredName(), + OutlierDetection.FEATURE_INFLUENCE_THRESHOLD.getPreferredName(), ElasticsearchMappings.CONFIG_TYPE, diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/OutlierDetectionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/OutlierDetectionTests.java index d7a32695971..db6ed7d34aa 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/OutlierDetectionTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/OutlierDetectionTests.java @@ -53,7 +53,7 @@ public class OutlierDetectionTests extends AbstractSerializingTestCase Date: Fri, 28 Jun 2019 12:58:22 +0200 Subject: [PATCH 25/42] Enabled cannot be updated (#43701) Removed the invalid tip that enabled can be updated for existing fields and clarified instead that it cannot. Related to #33566 and #33933 --- docs/reference/mapping/params/enabled.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/reference/mapping/params/enabled.asciidoc b/docs/reference/mapping/params/enabled.asciidoc index 7193c6aa9f6..edbfb1f77d7 100644 --- a/docs/reference/mapping/params/enabled.asciidoc +++ b/docs/reference/mapping/params/enabled.asciidoc @@ -89,8 +89,8 @@ GET my_index/_mapping <3> <2> The document can be retrieved. <3> Checking the mapping reveals that no fields have been added. -TIP: The `enabled` setting can be updated on existing fields -using the <>. +The `enabled` setting for existing fields and the top-level mapping +definition cannot be updated. Note that because Elasticsearch completely skips parsing the field contents, it is possible to add non-object data to a disabled field: From 74dd6e49fcd9ee83e7ab3b60b68db4aea65737a0 Mon Sep 17 00:00:00 2001 From: James Rodewig Date: Fri, 28 Jun 2019 08:35:20 -0400 Subject: [PATCH 26/42] [DOCS] Rewrite boosting query (#43647) --- .../query-dsl/boosting-query.asciidoc | 52 ++++++++++++++----- 1 file changed, 38 insertions(+), 14 deletions(-) diff --git a/docs/reference/query-dsl/boosting-query.asciidoc b/docs/reference/query-dsl/boosting-query.asciidoc index 5cd12ce1f00..c57235e7160 100644 --- a/docs/reference/query-dsl/boosting-query.asciidoc +++ b/docs/reference/query-dsl/boosting-query.asciidoc @@ -1,36 +1,60 @@ [[query-dsl-boosting-query]] === Boosting Query -The `boosting` query can be used to effectively demote results that -match a given query. Unlike the "NOT" clause in bool query, this still -selects documents that contain undesirable terms, but reduces their -overall score. +Returns documents matching a `positive` query while reducing the +<> of documents that also match a +`negative` query. -It accepts a `positive` query and a `negative` query. -Only documents that match the `positive` query will be included -in the results list, but documents that also match the `negative` query -will be downgraded by multiplying the original `_score` of the document -with the `negative_boost`. +You can use the `boosting` query to demote certain documents without +excluding them from the search results. + +[[boosting-query-ex-request]] +==== Example request [source,js] --------------------------------------------------- +---- GET /_search { "query": { "boosting" : { "positive" : { "term" : { - "field1" : "value1" + "text" : "apple" } }, "negative" : { "term" : { - "field2" : "value2" + "text" : "pie tart fruit crumble tree" } }, - "negative_boost" : 0.2 + "negative_boost" : 0.5 } } } --------------------------------------------------- +---- // CONSOLE + +[[boosting-top-level-params]] +==== Top-level parameters for `boosting` + +`positive` (Required):: +Query you wish to run. Any returned documents must match this query. + +`negative` (Required):: ++ +-- +Query used to decrease the <> of matching +documents. + +If a returned document matches the `positive` query and this query, the +`boosting` query calculates the final <> +for the document as follows: + +. Take the original relevance score from the `positive` query. +. Multiply the score by the `negative_boost` value. +-- + +`negative_boost` (Required):: +Floating point number between `0` and `1.0` used to decrease the +<> of documents matching the `negative` +query. \ No newline at end of file From 81dbcfb26815c7012b4d36dcdc20f843603ec11f Mon Sep 17 00:00:00 2001 From: Alan Woodward Date: Fri, 28 Jun 2019 13:58:06 +0100 Subject: [PATCH 27/42] Wildcard intervals (#43691) This commit adds a wildcard intervals source, similar to the prefix. It also changes the term parameter in prefix to read prefix, to bring it in to line with the pattern parameter in wildcard. Closes #43198 --- .../query-dsl/intervals-query.asciidoc | 28 ++++ .../test/search/230_interval_query.yml | 20 +++ .../index/query/IntervalsSourceProvider.java | 143 ++++++++++++++++-- .../elasticsearch/search/SearchModule.java | 2 + .../query/IntervalQueryBuilderTests.java | 80 +++++++++- 5 files changed, 258 insertions(+), 15 deletions(-) diff --git a/docs/reference/query-dsl/intervals-query.asciidoc b/docs/reference/query-dsl/intervals-query.asciidoc index 951147a21ac..7353ca137f3 100644 --- a/docs/reference/query-dsl/intervals-query.asciidoc +++ b/docs/reference/query-dsl/intervals-query.asciidoc @@ -101,6 +101,34 @@ If specified, then match intervals from this field rather than the top-level fie The `prefix` will be normalized using the search analyzer from this field, unless `analyzer` is specified separately. +[[intervals-wildcard]] +==== `wildcard` + +The `wildcard` rule finds terms that match a wildcard pattern. The pattern will +expand to match at most 128 terms; if there are more matching terms in the index, +then an error will be returned. + +[horizontal] +`pattern`:: +Find terms matching this pattern ++ +-- +This parameter supports two wildcard operators: + +* `?`, which matches any single character +* `*`, which can match zero or more characters, including an empty one + +WARNING: Avoid beginning patterns with `*` or `?`. This can increase +the iterations needed to find matching terms and slow search performance. +-- +`analyzer`:: +Which analyzer should be used to normalize the `pattern`. By default, the +search analyzer of the top-level field will be used. +`use_field`:: +If specified, then match intervals from this field rather than the top-level field. +The `pattern` will be normalized using the search analyzer from this field, unless +`analyzer` is specified separately. + [[intervals-all_of]] ==== `all_of` diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/230_interval_query.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/230_interval_query.yml index 460176c20c1..8f76daadfd5 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/230_interval_query.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/230_interval_query.yml @@ -407,3 +407,23 @@ setup: prefix: out - match: { hits.total.value: 3 } +--- +"Test wildcard": + - skip: + version: " - 8.0.0" + reason: "TODO: change to 7.3 in backport" + - do: + search: + index: test + body: + query: + intervals: + text: + all_of: + intervals: + - match: + query: cold + - wildcard: + pattern: out?ide + - match: { hits.total.value: 3 } + diff --git a/server/src/main/java/org/elasticsearch/index/query/IntervalsSourceProvider.java b/server/src/main/java/org/elasticsearch/index/query/IntervalsSourceProvider.java index 234018971ed..d4d28057c12 100644 --- a/server/src/main/java/org/elasticsearch/index/query/IntervalsSourceProvider.java +++ b/server/src/main/java/org/elasticsearch/index/query/IntervalsSourceProvider.java @@ -19,10 +19,12 @@ package org.elasticsearch.index.query; +import org.apache.lucene.index.IndexOptions; import org.apache.lucene.search.intervals.FilteredIntervalsSource; import org.apache.lucene.search.intervals.IntervalIterator; import org.apache.lucene.search.intervals.Intervals; import org.apache.lucene.search.intervals.IntervalsSource; +import org.apache.lucene.util.BytesRef; import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; @@ -80,6 +82,8 @@ public abstract class IntervalsSourceProvider implements NamedWriteable, ToXCont return Combine.fromXContent(parser); case "prefix": return Prefix.fromXContent(parser); + case "wildcard": + return Wildcard.fromXContent(parser); } throw new ParsingException(parser.getTokenLocation(), "Unknown interval type [" + parser.currentName() + "], expecting one of [match, any_of, all_of, prefix]"); @@ -446,18 +450,18 @@ public abstract class IntervalsSourceProvider implements NamedWriteable, ToXCont public static final String NAME = "prefix"; - private final String term; + private final String prefix; private final String analyzer; private final String useField; - public Prefix(String term, String analyzer, String useField) { - this.term = term; + public Prefix(String prefix, String analyzer, String useField) { + this.prefix = prefix; this.analyzer = analyzer; this.useField = useField; } public Prefix(StreamInput in) throws IOException { - this.term = in.readString(); + this.prefix = in.readString(); this.analyzer = in.readOptionalString(); this.useField = in.readOptionalString(); } @@ -472,10 +476,10 @@ public abstract class IntervalsSourceProvider implements NamedWriteable, ToXCont if (useField != null) { fieldType = context.fieldMapper(useField); assert fieldType != null; - source = Intervals.fixField(useField, fieldType.intervals(term, 0, false, analyzer, true)); + source = Intervals.fixField(useField, fieldType.intervals(prefix, 0, false, analyzer, true)); } else { - source = fieldType.intervals(term, 0, false, analyzer, true); + source = fieldType.intervals(prefix, 0, false, analyzer, true); } return source; } @@ -492,14 +496,14 @@ public abstract class IntervalsSourceProvider implements NamedWriteable, ToXCont if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Prefix prefix = (Prefix) o; - return Objects.equals(term, prefix.term) && + return Objects.equals(this.prefix, prefix.prefix) && Objects.equals(analyzer, prefix.analyzer) && Objects.equals(useField, prefix.useField); } @Override public int hashCode() { - return Objects.hash(term, analyzer, useField); + return Objects.hash(prefix, analyzer, useField); } @Override @@ -509,7 +513,7 @@ public abstract class IntervalsSourceProvider implements NamedWriteable, ToXCont @Override public void writeTo(StreamOutput out) throws IOException { - out.writeString(term); + out.writeString(prefix); out.writeOptionalString(analyzer); out.writeOptionalString(useField); } @@ -517,7 +521,7 @@ public abstract class IntervalsSourceProvider implements NamedWriteable, ToXCont @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(NAME); - builder.field("term", term); + builder.field("prefix", prefix); if (analyzer != null) { builder.field("analyzer", analyzer); } @@ -535,7 +539,7 @@ public abstract class IntervalsSourceProvider implements NamedWriteable, ToXCont return new Prefix(term, analyzer, useField); }); static { - PARSER.declareString(constructorArg(), new ParseField("term")); + PARSER.declareString(constructorArg(), new ParseField("prefix")); PARSER.declareString(optionalConstructorArg(), new ParseField("analyzer")); PARSER.declareString(optionalConstructorArg(), new ParseField("use_field")); } @@ -545,6 +549,123 @@ public abstract class IntervalsSourceProvider implements NamedWriteable, ToXCont } } + public static class Wildcard extends IntervalsSourceProvider { + + public static final String NAME = "wildcard"; + + private final String pattern; + private final String analyzer; + private final String useField; + + public Wildcard(String pattern, String analyzer, String useField) { + this.pattern = pattern; + this.analyzer = analyzer; + this.useField = useField; + } + + public Wildcard(StreamInput in) throws IOException { + this.pattern = in.readString(); + this.analyzer = in.readOptionalString(); + this.useField = in.readOptionalString(); + } + + @Override + public IntervalsSource getSource(QueryShardContext context, MappedFieldType fieldType) { + NamedAnalyzer analyzer = fieldType.searchAnalyzer(); + if (this.analyzer != null) { + analyzer = context.getMapperService().getIndexAnalyzers().get(this.analyzer); + } + IntervalsSource source; + if (useField != null) { + fieldType = context.fieldMapper(useField); + assert fieldType != null; + checkPositions(fieldType); + if (this.analyzer == null) { + analyzer = fieldType.searchAnalyzer(); + } + BytesRef normalizedTerm = analyzer.normalize(useField, pattern); + // TODO Intervals.wildcard() should take BytesRef + source = Intervals.fixField(useField, Intervals.wildcard(normalizedTerm.utf8ToString())); + } + else { + checkPositions(fieldType); + BytesRef normalizedTerm = analyzer.normalize(fieldType.name(), pattern); + source = Intervals.wildcard(normalizedTerm.utf8ToString()); + } + return source; + } + + private void checkPositions(MappedFieldType type) { + if (type.indexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) { + throw new IllegalArgumentException("Cannot create intervals over field [" + type.name() + "] with no positions indexed"); + } + } + + @Override + public void extractFields(Set fields) { + if (useField != null) { + fields.add(useField); + } + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Prefix prefix = (Prefix) o; + return Objects.equals(pattern, prefix.prefix) && + Objects.equals(analyzer, prefix.analyzer) && + Objects.equals(useField, prefix.useField); + } + + @Override + public int hashCode() { + return Objects.hash(pattern, analyzer, useField); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(pattern); + out.writeOptionalString(analyzer); + out.writeOptionalString(useField); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(NAME); + builder.field("pattern", pattern); + if (analyzer != null) { + builder.field("analyzer", analyzer); + } + if (useField != null) { + builder.field("use_field", useField); + } + builder.endObject(); + return builder; + } + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, args -> { + String term = (String) args[0]; + String analyzer = (String) args[1]; + String useField = (String) args[2]; + return new Wildcard(term, analyzer, useField); + }); + static { + PARSER.declareString(constructorArg(), new ParseField("pattern")); + PARSER.declareString(optionalConstructorArg(), new ParseField("analyzer")); + PARSER.declareString(optionalConstructorArg(), new ParseField("use_field")); + } + + public static Wildcard fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + } + static class ScriptFilterSource extends FilteredIntervalsSource { final IntervalFilterScript script; diff --git a/server/src/main/java/org/elasticsearch/search/SearchModule.java b/server/src/main/java/org/elasticsearch/search/SearchModule.java index 1e0f0012114..236c9c12d57 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchModule.java +++ b/server/src/main/java/org/elasticsearch/search/SearchModule.java @@ -849,6 +849,8 @@ public class SearchModule { IntervalsSourceProvider.Disjunction.NAME, IntervalsSourceProvider.Disjunction::new)); namedWriteables.add(new NamedWriteableRegistry.Entry(IntervalsSourceProvider.class, IntervalsSourceProvider.Prefix.NAME, IntervalsSourceProvider.Prefix::new)); + namedWriteables.add(new NamedWriteableRegistry.Entry(IntervalsSourceProvider.class, + IntervalsSourceProvider.Wildcard.NAME, IntervalsSourceProvider.Wildcard::new)); } private void registerQuery(QuerySpec spec) { diff --git a/server/src/test/java/org/elasticsearch/index/query/IntervalQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/IntervalQueryBuilderTests.java index 7838f77cc16..c480b52c6dc 100644 --- a/server/src/test/java/org/elasticsearch/index/query/IntervalQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/IntervalQueryBuilderTests.java @@ -393,32 +393,104 @@ public class IntervalQueryBuilderTests extends AbstractQueryTestCase { IntervalQueryBuilder builder1 = (IntervalQueryBuilder) parseQuery(no_positions_json); builder1.toQuery(createShardContext()); }); + String no_positions_fixed_field_json = "{ \"intervals\" : { \"" + STRING_FIELD_NAME + "\": { " + + "\"prefix\" : { \"prefix\" : \"term\", \"use_field\" : \"" + NO_POSITIONS_FIELD + "\" } } } }"; + expectThrows(IllegalArgumentException.class, () -> { + IntervalQueryBuilder builder1 = (IntervalQueryBuilder) parseQuery(no_positions_fixed_field_json); + builder1.toQuery(createShardContext()); + }); + String prefix_json = "{ \"intervals\" : { \"" + PREFIXED_FIELD + "\": { " + - "\"prefix\" : { \"term\" : \"term\" } } } }"; + "\"prefix\" : { \"prefix\" : \"term\" } } } }"; builder = (IntervalQueryBuilder) parseQuery(prefix_json); expected = new IntervalQuery(PREFIXED_FIELD, Intervals.fixField(PREFIXED_FIELD + "._index_prefix", Intervals.term("term"))); assertEquals(expected, builder.toQuery(createShardContext())); String short_prefix_json = "{ \"intervals\" : { \"" + PREFIXED_FIELD + "\": { " + - "\"prefix\" : { \"term\" : \"t\" } } } }"; + "\"prefix\" : { \"prefix\" : \"t\" } } } }"; builder = (IntervalQueryBuilder) parseQuery(short_prefix_json); expected = new IntervalQuery(PREFIXED_FIELD, Intervals.or( Intervals.fixField(PREFIXED_FIELD + "._index_prefix", Intervals.wildcard("t?")), Intervals.term("t"))); assertEquals(expected, builder.toQuery(createShardContext())); + String fix_field_prefix_json = "{ \"intervals\" : { \"" + STRING_FIELD_NAME + "\": { " + + "\"prefix\" : { \"prefix\" : \"term\", \"use_field\" : \"" + PREFIXED_FIELD + "\" } } } }"; + builder = (IntervalQueryBuilder) parseQuery(fix_field_prefix_json); + // This looks weird, but it's fine, because the innermost fixField wins + expected = new IntervalQuery(STRING_FIELD_NAME, + Intervals.fixField(PREFIXED_FIELD, Intervals.fixField(PREFIXED_FIELD + "._index_prefix", Intervals.term("term")))); + assertEquals(expected, builder.toQuery(createShardContext())); + + String keyword_json = "{ \"intervals\" : { \"" + PREFIXED_FIELD + "\": { " + + "\"prefix\" : { \"prefix\" : \"Term\", \"analyzer\" : \"keyword\" } } } }"; + builder = (IntervalQueryBuilder) parseQuery(keyword_json); + expected = new IntervalQuery(PREFIXED_FIELD, Intervals.fixField(PREFIXED_FIELD + "._index_prefix", Intervals.term("Term"))); + assertEquals(expected, builder.toQuery(createShardContext())); + + String keyword_fix_field_json = "{ \"intervals\" : { \"" + STRING_FIELD_NAME + "\": { " + + "\"prefix\" : { \"prefix\" : \"Term\", \"analyzer\" : \"keyword\", \"use_field\" : \"" + PREFIXED_FIELD + "\" } } } }"; + builder = (IntervalQueryBuilder) parseQuery(keyword_fix_field_json); + expected = new IntervalQuery(STRING_FIELD_NAME, + Intervals.fixField(PREFIXED_FIELD, Intervals.fixField(PREFIXED_FIELD + "._index_prefix", Intervals.term("Term")))); + assertEquals(expected, builder.toQuery(createShardContext())); + } + + public void testWildcard() throws IOException { + + String json = "{ \"intervals\" : { \"" + STRING_FIELD_NAME + "\": { " + + "\"wildcard\" : { \"pattern\" : \"Te?m\" } } } }"; + + IntervalQueryBuilder builder = (IntervalQueryBuilder) parseQuery(json); + Query expected = new IntervalQuery(STRING_FIELD_NAME, Intervals.wildcard("te?m")); + assertEquals(expected, builder.toQuery(createShardContext())); + + String no_positions_json = "{ \"intervals\" : { \"" + NO_POSITIONS_FIELD + "\": { " + + "\"wildcard\" : { \"pattern\" : \"term\" } } } }"; + expectThrows(IllegalArgumentException.class, () -> { + IntervalQueryBuilder builder1 = (IntervalQueryBuilder) parseQuery(no_positions_json); + builder1.toQuery(createShardContext()); + }); + + String keyword_json = "{ \"intervals\" : { \"" + STRING_FIELD_NAME + "\": { " + + "\"wildcard\" : { \"pattern\" : \"Te?m\", \"analyzer\" : \"keyword\" } } } }"; + + builder = (IntervalQueryBuilder) parseQuery(keyword_json); + expected = new IntervalQuery(STRING_FIELD_NAME, Intervals.wildcard("Te?m")); + assertEquals(expected, builder.toQuery(createShardContext())); + + String fixed_field_json = "{ \"intervals\" : { \"" + STRING_FIELD_NAME + "\": { " + + "\"wildcard\" : { \"pattern\" : \"Te?m\", \"use_field\" : \"masked_field\" } } } }"; + + builder = (IntervalQueryBuilder) parseQuery(fixed_field_json); + expected = new IntervalQuery(STRING_FIELD_NAME, Intervals.fixField(MASKED_FIELD, Intervals.wildcard("te?m"))); + assertEquals(expected, builder.toQuery(createShardContext())); + + String fixed_field_json_no_positions = "{ \"intervals\" : { \"" + STRING_FIELD_NAME + "\": { " + + "\"wildcard\" : { \"pattern\" : \"Te?m\", \"use_field\" : \"" + NO_POSITIONS_FIELD + "\" } } } }"; + expectThrows(IllegalArgumentException.class, () -> { + IntervalQueryBuilder builder1 = (IntervalQueryBuilder) parseQuery(fixed_field_json_no_positions); + builder1.toQuery(createShardContext()); + }); + + String fixed_field_analyzer_json = "{ \"intervals\" : { \"" + STRING_FIELD_NAME + "\": { " + + "\"wildcard\" : { \"pattern\" : \"Te?m\", \"use_field\" : \"masked_field\", \"analyzer\" : \"keyword\" } } } }"; + + builder = (IntervalQueryBuilder) parseQuery(fixed_field_analyzer_json); + expected = new IntervalQuery(STRING_FIELD_NAME, Intervals.fixField(MASKED_FIELD, Intervals.wildcard("Te?m"))); + assertEquals(expected, builder.toQuery(createShardContext())); } } From d1a4d8866d55ada6ef64d619a10d66eb6636769f Mon Sep 17 00:00:00 2001 From: Alpar Torok Date: Fri, 28 Jun 2019 16:33:24 +0300 Subject: [PATCH 28/42] Add missing dependencies so we can build in parallel (#43672) --- distribution/build.gradle | 3 ++- x-pack/plugin/sql/sql-cli/build.gradle | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/distribution/build.gradle b/distribution/build.gradle index 5f269f700b8..13a4b25ab78 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -117,7 +117,7 @@ task buildTransportModules { void copyModule(Sync copyTask, Project module) { copyTask.configure { - dependsOn { module.bundlePlugin } + dependsOn "${module.path}:bundlePlugin" from({ zipTree(module.bundlePlugin.outputs.files.singleFile) }) { includeEmptyDirs false @@ -167,6 +167,7 @@ buildDefaultLog4jConfig.doLast(writeLog4jProperties) // copy log4j2.properties from modules that have it void copyLog4jProperties(Task buildTask, Project module) { + buildTask.dependsOn "${module.path}:bundlePlugin" buildTask.doFirst { FileTree tree = zipTree(module.bundlePlugin.outputs.files.singleFile) FileTree filtered = tree.matching { diff --git a/x-pack/plugin/sql/sql-cli/build.gradle b/x-pack/plugin/sql/sql-cli/build.gradle index 927d165c2d2..bbd87c055d7 100644 --- a/x-pack/plugin/sql/sql-cli/build.gradle +++ b/x-pack/plugin/sql/sql-cli/build.gradle @@ -51,9 +51,9 @@ dependencyLicenses { * can be easily shipped around and used. */ jar { + dependsOn configurations.runtimeClasspath from({ - configurations.compile.collect { it.isDirectory() ? it : zipTree(it) } - configurations.runtime.collect { it.isDirectory() ? it : zipTree(it) } + configurations.runtimeClasspath.collect { it.isDirectory() ? it : zipTree(it) } }) { // We don't need the META-INF from the things we bundle. For now. exclude 'META-INF/*' From 23f739b513710878b1e95e1693d4b0a1d565aa06 Mon Sep 17 00:00:00 2001 From: Alpar Torok Date: Fri, 28 Jun 2019 16:38:17 +0300 Subject: [PATCH 29/42] Testclusters: Convert additional projects (#43625) * Testclusters: Convert additional projects Found some more that were not using testclusters from elasticsearch-ci/1 * Allow IOException too * Make the client more resilient --- qa/ccs-unavailable-clusters/build.gradle | 1 + qa/die-with-dignity/build.gradle | 10 +++- .../elasticsearch/DieWithDignityPlugin.java | 4 ++ .../qa/die_with_dignity/DieWithDignityIT.java | 55 +++++++------------ qa/evil-tests/build.gradle | 1 + qa/multi-cluster-search/build.gradle | 39 ++++++------- qa/smoke-test-http/build.gradle | 3 +- .../build.gradle | 1 + 8 files changed, 56 insertions(+), 58 deletions(-) diff --git a/qa/ccs-unavailable-clusters/build.gradle b/qa/ccs-unavailable-clusters/build.gradle index ea80ee983b8..749623b26c3 100644 --- a/qa/ccs-unavailable-clusters/build.gradle +++ b/qa/ccs-unavailable-clusters/build.gradle @@ -16,6 +16,7 @@ * specific language governing permissions and limitations * under the License. */ +apply plugin: 'elasticsearch.testclusters' apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' apply plugin: 'elasticsearch.test-with-dependencies' diff --git a/qa/die-with-dignity/build.gradle b/qa/die-with-dignity/build.gradle index 140df6e283a..a40f6366e65 100644 --- a/qa/die-with-dignity/build.gradle +++ b/qa/die-with-dignity/build.gradle @@ -17,6 +17,7 @@ * under the License. */ +apply plugin: 'elasticsearch.testclusters' apply plugin: 'elasticsearch.esplugin' esplugin { @@ -24,14 +25,17 @@ esplugin { classname 'org.elasticsearch.DieWithDignityPlugin' } -integTestRunner { +integTest.runner { systemProperty 'tests.security.manager', 'false' systemProperty 'tests.system_call_filter', 'false' - nonInputProperties.systemProperty 'pidfile', "${-> integTest.getNodes().get(0).pidFile}" - nonInputProperties.systemProperty 'log', "${-> integTest.getNodes().get(0).homeDir}/logs/${-> integTest.getNodes().get(0).clusterName}_server.json" + nonInputProperties.systemProperty 'log', "${-> testClusters.integTest.singleNode().getServerLog()}" systemProperty 'runtime.java.home', "${project.runtimeJavaHome}" } +testClusters.integTest { + systemProperty "die.with.dignity.test", "whatever" +} + test.enabled = false check.dependsOn integTest diff --git a/qa/die-with-dignity/src/main/java/org/elasticsearch/DieWithDignityPlugin.java b/qa/die-with-dignity/src/main/java/org/elasticsearch/DieWithDignityPlugin.java index ed1e3d3879a..8027eeb8948 100644 --- a/qa/die-with-dignity/src/main/java/org/elasticsearch/DieWithDignityPlugin.java +++ b/qa/die-with-dignity/src/main/java/org/elasticsearch/DieWithDignityPlugin.java @@ -36,6 +36,10 @@ import java.util.function.Supplier; public class DieWithDignityPlugin extends Plugin implements ActionPlugin { + public DieWithDignityPlugin() { + assert System.getProperty("die.with.dignity.test") != null : "test should pass the `die.with.dignity.test` property"; + } + @Override public List getRestHandlers( final Settings settings, diff --git a/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java b/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java index 737106ee920..20ceb90ca03 100644 --- a/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java +++ b/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java @@ -19,12 +19,10 @@ package org.elasticsearch.qa.die_with_dignity; -import org.apache.http.ConnectionClosedException; -import org.apache.lucene.util.Constants; import org.elasticsearch.client.Request; import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.rest.ESRestTestCase; -import org.hamcrest.Matcher; import java.io.BufferedReader; import java.io.IOException; @@ -36,51 +34,28 @@ import java.util.Iterator; import java.util.List; import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.hasToString; -import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.not; public class DieWithDignityIT extends ESRestTestCase { @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/43413") public void testDieWithDignity() throws Exception { - // deleting the PID file prevents stopping the cluster from failing since it occurs if and only if the PID file exists - final Path pidFile = PathUtils.get(System.getProperty("pidfile")); - final List pidFileLines = Files.readAllLines(pidFile); - assertThat(pidFileLines, hasSize(1)); - final int pid = Integer.parseInt(pidFileLines.get(0)); - Files.delete(pidFile); - IOException e = expectThrows(IOException.class, - () -> client().performRequest(new Request("GET", "/_die_with_dignity"))); - Matcher failureMatcher = instanceOf(ConnectionClosedException.class); - if (Constants.WINDOWS) { - /* - * If the other side closes the connection while we're waiting to fill our buffer - * we can get IOException with the message below. It seems to only come up on - * Windows and it *feels* like it could be a ConnectionClosedException but - * upstream does not consider this a bug: - * https://issues.apache.org/jira/browse/HTTPASYNC-134 - * - * So we catch it here and consider it "ok". - */ - failureMatcher = either(failureMatcher) - .or(hasToString(containsString("An existing connection was forcibly closed by the remote host"))); - } - assertThat(e, failureMatcher); + expectThrows( + IOException.class, + () -> client().performRequest(new Request("GET", "/_die_with_dignity")) + ); // the Elasticsearch process should die and disappear from the output of jps assertBusy(() -> { final String jpsPath = PathUtils.get(System.getProperty("runtime.java.home"), "bin/jps").toString(); - final Process process = new ProcessBuilder().command(jpsPath).start(); + final Process process = new ProcessBuilder().command(jpsPath, "-v").start(); assertThat(process.waitFor(), equalTo(0)); + try (InputStream is = process.getInputStream(); BufferedReader in = new BufferedReader(new InputStreamReader(is, "UTF-8"))) { String line; while ((line = in.readLine()) != null) { - final int currentPid = Integer.parseInt(line.split("\\s+")[0]); - assertThat(line, pid, not(equalTo(currentPid))); + assertThat(line, line, not(containsString("-Ddie.with.dignity.test"))); } } }); @@ -95,9 +70,9 @@ public class DieWithDignityIT extends ESRestTestCase { try { while (it.hasNext() && (fatalError == false || fatalErrorInThreadExiting == false)) { final String line = it.next(); - if (line.matches(".*ERROR.*o\\.e\\.ExceptionsHelper.*node-0.*fatal error.*")) { + if (line.matches(".*ERROR.*o\\.e\\.ExceptionsHelper.*integTest-0.*fatal error.*")) { fatalError = true; - } else if (line.matches(".*ERROR.*o\\.e\\.b\\.ElasticsearchUncaughtExceptionHandler.*node-0.*" + } else if (line.matches(".*ERROR.*o\\.e\\.b\\.ElasticsearchUncaughtExceptionHandler.*integTest-0.*" + "fatal error in thread \\[Thread-\\d+\\], exiting.*")) { fatalErrorInThreadExiting = true; assertTrue(it.hasNext()); @@ -127,4 +102,14 @@ public class DieWithDignityIT extends ESRestTestCase { return true; } + @Override + protected final Settings restClientSettings() { + return Settings.builder().put(super.restClientSettings()) + // increase the timeout here to 90 seconds to handle long waits for a green + // cluster health. the waits for green need to be longer than a minute to + // account for delayed shards + .put(ESRestTestCase.CLIENT_SOCKET_TIMEOUT, "1s") + .build(); + } + } diff --git a/qa/evil-tests/build.gradle b/qa/evil-tests/build.gradle index 2f9239e5c2f..38c1b3e1a9a 100644 --- a/qa/evil-tests/build.gradle +++ b/qa/evil-tests/build.gradle @@ -23,6 +23,7 @@ * threads, etc. */ +apply plugin: 'elasticsearch.testclusters' apply plugin: 'elasticsearch.standalone-test' dependencies { diff --git a/qa/multi-cluster-search/build.gradle b/qa/multi-cluster-search/build.gradle index 7f923d03f71..1913c86fc9c 100644 --- a/qa/multi-cluster-search/build.gradle +++ b/qa/multi-cluster-search/build.gradle @@ -19,42 +19,43 @@ import org.elasticsearch.gradle.test.RestIntegTestTask +apply plugin: 'elasticsearch.testclusters' apply plugin: 'elasticsearch.standalone-test' dependencies { testCompile project(":client:rest-high-level") } -task remoteClusterTest(type: RestIntegTestTask) { +task 'remote-cluster'(type: RestIntegTestTask) { mustRunAfter(precommit) + runner { + systemProperty 'tests.rest.suite', 'remote_cluster' + } } -remoteClusterTestCluster { - numNodes = 2 - clusterName = 'remote-cluster' - setting 'cluster.remote.connect', false +testClusters.'remote-cluster' { + numberOfNodes = 2 + setting 'cluster.remote.connect', 'false' } -remoteClusterTestRunner { - systemProperty 'tests.rest.suite', 'remote_cluster' +task mixedClusterTest(type: RestIntegTestTask) { + useCluster testClusters.'remote-cluster' + runner { + dependsOn 'remote-cluster' + systemProperty 'tests.rest.suite', 'multi_cluster' + } } -task mixedClusterTest(type: RestIntegTestTask) {} - -mixedClusterTestCluster { - dependsOn remoteClusterTestRunner - setting 'cluster.remote.my_remote_cluster.seeds', "\"${-> remoteClusterTest.nodes.get(0).transportUri()}\"" - setting 'cluster.remote.connections_per_cluster', 1 - setting 'cluster.remote.connect', true +testClusters.mixedClusterTest { + setting 'cluster.remote.my_remote_cluster.seeds', + { "\"${testClusters.'remote-cluster'.getAllTransportPortURI().get(0)}\"" } + setting 'cluster.remote.connections_per_cluster', '1' + setting 'cluster.remote.connect', 'true' } -mixedClusterTestRunner { - systemProperty 'tests.rest.suite', 'multi_cluster' - finalizedBy 'remoteClusterTestCluster#node0.stop','remoteClusterTestCluster#node1.stop' -} task integTest { - dependsOn = [mixedClusterTest] + dependsOn mixedClusterTest } test.enabled = false // no unit tests for multi-cluster-search, only integration tests diff --git a/qa/smoke-test-http/build.gradle b/qa/smoke-test-http/build.gradle index ef1a97fc7ab..90fc9e39501 100644 --- a/qa/smoke-test-http/build.gradle +++ b/qa/smoke-test-http/build.gradle @@ -17,6 +17,7 @@ * under the License. */ +apply plugin: 'elasticsearch.testclusters' apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' apply plugin: 'elasticsearch.test-with-dependencies' @@ -26,7 +27,7 @@ dependencies { testCompile project(path: ':plugins:transport-nio', configuration: 'runtime') // for http } -integTestRunner { +integTest.runner { /* * We have to disable setting the number of available processors as tests in the same JVM randomize processors and will step on each * other if we allow them to set the number of available processors as it's set-once in Netty. diff --git a/qa/smoke-test-ingest-with-all-dependencies/build.gradle b/qa/smoke-test-ingest-with-all-dependencies/build.gradle index 9267f90cd7e..9f5c40ac937 100644 --- a/qa/smoke-test-ingest-with-all-dependencies/build.gradle +++ b/qa/smoke-test-ingest-with-all-dependencies/build.gradle @@ -17,6 +17,7 @@ * under the License. */ +apply plugin: 'elasticsearch.testclusters' apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' From 377c4cfdc043a7f54eafe744501c92d01ab045c6 Mon Sep 17 00:00:00 2001 From: weizijun Date: Fri, 28 Jun 2019 21:47:24 +0800 Subject: [PATCH 30/42] Fix threshold spelling errors (#43326) Substitutes treshold by threshold --- .../decider/DiskThresholdDecider.java | 6 ++--- .../allocation/BalanceConfigurationTests.java | 22 +++++++++---------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java index 1f048fca76c..0838999c4f3 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java @@ -131,12 +131,12 @@ public class DiskThresholdDecider extends AllocationDecider { // flag that determines whether the low threshold checks below can be skipped. We use this for a primary shard that is freshly // allocated and empty. - boolean skipLowTresholdChecks = shardRouting.primary() && + boolean skipLowThresholdChecks = shardRouting.primary() && shardRouting.active() == false && shardRouting.recoverySource().getType() == RecoverySource.Type.EMPTY_STORE; // checks for exact byte comparisons if (freeBytes < diskThresholdSettings.getFreeBytesThresholdLow().getBytes()) { - if (skipLowTresholdChecks == false) { + if (skipLowThresholdChecks == false) { if (logger.isDebugEnabled()) { logger.debug("less than the required {} free bytes threshold ({} free) on node {}, preventing allocation", diskThresholdSettings.getFreeBytesThresholdLow(), freeBytesValue, node.nodeId()); @@ -178,7 +178,7 @@ public class DiskThresholdDecider extends AllocationDecider { // checks for percentage comparisons if (freeDiskPercentage < diskThresholdSettings.getFreeDiskThresholdLow()) { // If the shard is a replica or is a non-empty primary, check the low threshold - if (skipLowTresholdChecks == false) { + if (skipLowThresholdChecks == false) { if (logger.isDebugEnabled()) { logger.debug("more than the allowed {} used disk threshold ({} used) on node [{}], preventing allocation", Strings.format1Decimals(usedDiskThresholdLow, "%"), diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java index 10fa01c2a2b..ba18bc0f6d4 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java @@ -64,28 +64,28 @@ public class BalanceConfigurationTests extends ESAllocationTestCase { /* Tests balance over indices only */ final float indexBalance = 1.0f; final float replicaBalance = 0.0f; - final float balanceTreshold = 1.0f; + final float balanceThreshold = 1.0f; Settings.Builder settings = Settings.builder(); settings.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()); settings.put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), indexBalance); settings.put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), replicaBalance); - settings.put(BalancedShardsAllocator.THRESHOLD_SETTING.getKey(), balanceTreshold); + settings.put(BalancedShardsAllocator.THRESHOLD_SETTING.getKey(), balanceThreshold); AllocationService strategy = createAllocationService(settings.build(), new TestGatewayAllocator()); ClusterState clusterState = initCluster(strategy); assertIndexBalance(clusterState.getRoutingTable(), clusterState.getRoutingNodes(), numberOfNodes, numberOfIndices, - numberOfReplicas, numberOfShards, balanceTreshold); + numberOfReplicas, numberOfShards, balanceThreshold); clusterState = addNode(clusterState, strategy); assertIndexBalance(clusterState.getRoutingTable(), clusterState.getRoutingNodes(), numberOfNodes + 1, - numberOfIndices, numberOfReplicas, numberOfShards, balanceTreshold); + numberOfIndices, numberOfReplicas, numberOfShards, balanceThreshold); clusterState = removeNodes(clusterState, strategy); assertIndexBalance(clusterState.getRoutingTable(), clusterState.getRoutingNodes(), - (numberOfNodes + 1) - (numberOfNodes + 1) / 2, numberOfIndices, numberOfReplicas, numberOfShards, balanceTreshold); + (numberOfNodes + 1) - (numberOfNodes + 1) / 2, numberOfIndices, numberOfReplicas, numberOfShards, balanceThreshold); } public void testReplicaBalance() { @@ -201,7 +201,7 @@ public class BalanceConfigurationTests extends ESAllocationTestCase { private void assertReplicaBalance(RoutingNodes nodes, int numberOfNodes, int numberOfIndices, int numberOfReplicas, - int numberOfShards, float treshold) { + int numberOfShards, float threshold) { final int unassigned = nodes.unassigned().size(); if (unassigned > 0) { @@ -220,8 +220,8 @@ public class BalanceConfigurationTests extends ESAllocationTestCase { final int numShards = numberOfIndices * numberOfShards * (numberOfReplicas + 1) - unassigned; final float avgNumShards = (float) (numShards) / (float) (numberOfNodes); - final int minAvgNumberOfShards = Math.round(Math.round(Math.floor(avgNumShards - treshold))); - final int maxAvgNumberOfShards = Math.round(Math.round(Math.ceil(avgNumShards + treshold))); + final int minAvgNumberOfShards = Math.round(Math.round(Math.floor(avgNumShards - threshold))); + final int maxAvgNumberOfShards = Math.round(Math.round(Math.ceil(avgNumShards + threshold))); for (RoutingNode node : nodes) { assertThat(node.shardsWithState(STARTED).size(), Matchers.greaterThanOrEqualTo(minAvgNumberOfShards)); @@ -230,12 +230,12 @@ public class BalanceConfigurationTests extends ESAllocationTestCase { } private void assertIndexBalance(RoutingTable routingTable, RoutingNodes nodes, int numberOfNodes, int numberOfIndices, - int numberOfReplicas, int numberOfShards, float treshold) { + int numberOfReplicas, int numberOfShards, float threshold) { final int numShards = numberOfShards * (numberOfReplicas + 1); final float avgNumShards = (float) (numShards) / (float) (numberOfNodes); - final int minAvgNumberOfShards = Math.round(Math.round(Math.floor(avgNumShards - treshold))); - final int maxAvgNumberOfShards = Math.round(Math.round(Math.ceil(avgNumShards + treshold))); + final int minAvgNumberOfShards = Math.round(Math.round(Math.floor(avgNumShards - threshold))); + final int maxAvgNumberOfShards = Math.round(Math.round(Math.ceil(avgNumShards + threshold))); for (ObjectCursor index : routingTable.indicesRouting().keys()) { for (RoutingNode node : nodes) { From 7ca69db83fd9a3677307ceac098f78340ed40825 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Fri, 28 Jun 2019 15:42:43 +0200 Subject: [PATCH 31/42] Refactor IndexSearcherWrapper to disallow the wrapping of IndexSearcher (#43645) This change removes the ability to wrap an IndexSearcher in plugins. The IndexSearcherWrapper is replaced by an IndexReaderWrapper and allows to wrap the DirectoryReader only. This simplifies the creation of the context IndexSearcher that is used on a per request basis. This change also moves the optimization that was implemented in the security index searcher wrapper to the ContextIndexSearcher that now checks the live docs to determine how the search should be executed. If the underlying live docs is a sparse bit set the searcher will compute the intersection betweeen the query and the live docs instead of checking the live docs on every document that match the query. --- .../apache/lucene/search/XIndexSearcher.java | 46 -- .../apache/lucene/util/CombinedBitSet.java | 117 ++++ .../org/elasticsearch/index/IndexModule.java | 49 +- .../org/elasticsearch/index/IndexService.java | 16 +- .../index/shard/IndexSearcherWrapper.java | 138 ----- .../elasticsearch/index/shard/IndexShard.java | 79 ++- .../search/DefaultSearchContext.java | 3 +- .../search/internal/ContextIndexSearcher.java | 122 +++- .../lucene/util/CombinedBitSetTests.java | 111 ++++ .../elasticsearch/index/IndexModuleTests.java | 20 +- .../index/engine/InternalEngineTests.java | 31 - ...ests.java => IndexReaderWrapperTests.java} | 91 +-- .../index/shard/IndexShardIT.java | 12 +- .../index/shard/IndexShardTests.java | 43 +- .../IndexingMemoryControllerTests.java | 5 +- .../internal/ContextIndexSearcherTests.java | 425 +++++++++++++ .../profile/query/QueryProfilerTests.java | 9 +- .../index/shard/IndexShardTestCase.java | 36 +- .../aggregations/AggregatorTestCase.java | 16 +- .../test/engine/MockEngineSupport.java | 19 +- .../index/engine/FrozenEngine.java | 8 +- .../accesscontrol/DocumentSubsetReader.java | 23 +- .../SecurityIndexReaderWrapper.java | 111 ++++ .../SecurityIndexSearcherWrapper.java | 217 ------- .../SourceOnlySnapshotShardTests.java | 4 +- ...tyIndexReaderWrapperIntegrationTests.java} | 17 +- .../SecurityIndexReaderWrapperUnitTests.java | 225 +++++++ ...SecurityIndexSearcherWrapperUnitTests.java | 561 ------------------ .../xpack/security/Security.java | 6 +- 29 files changed, 1275 insertions(+), 1285 deletions(-) delete mode 100644 server/src/main/java/org/apache/lucene/search/XIndexSearcher.java create mode 100644 server/src/main/java/org/apache/lucene/util/CombinedBitSet.java delete mode 100644 server/src/main/java/org/elasticsearch/index/shard/IndexSearcherWrapper.java create mode 100644 server/src/test/java/org/apache/lucene/util/CombinedBitSetTests.java rename server/src/test/java/org/elasticsearch/index/shard/{IndexSearcherWrapperTests.java => IndexReaderWrapperTests.java} (65%) create mode 100644 server/src/test/java/org/elasticsearch/search/internal/ContextIndexSearcherTests.java create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexReaderWrapper.java delete mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapper.java rename x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/{SecurityIndexSearcherWrapperIntegrationTests.java => SecurityIndexReaderWrapperIntegrationTests.java} (93%) create mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexReaderWrapperUnitTests.java delete mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapperUnitTests.java diff --git a/server/src/main/java/org/apache/lucene/search/XIndexSearcher.java b/server/src/main/java/org/apache/lucene/search/XIndexSearcher.java deleted file mode 100644 index 100c5f4944a..00000000000 --- a/server/src/main/java/org/apache/lucene/search/XIndexSearcher.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.lucene.search; - -import org.apache.lucene.index.LeafReaderContext; - -import java.io.IOException; -import java.util.List; - -/** - * A wrapper for {@link IndexSearcher} that makes {@link IndexSearcher#search(List, Weight, Collector)} - * visible by sub-classes. - */ -public class XIndexSearcher extends IndexSearcher { - private final IndexSearcher in; - - public XIndexSearcher(IndexSearcher in) { - super(in.getIndexReader()); - this.in = in; - setSimilarity(in.getSimilarity()); - setQueryCache(in.getQueryCache()); - setQueryCachingPolicy(in.getQueryCachingPolicy()); - } - - @Override - public void search(List leaves, Weight weight, Collector collector) throws IOException { - in.search(leaves, weight, collector); - } -} diff --git a/server/src/main/java/org/apache/lucene/util/CombinedBitSet.java b/server/src/main/java/org/apache/lucene/util/CombinedBitSet.java new file mode 100644 index 00000000000..cb1bd819ab2 --- /dev/null +++ b/server/src/main/java/org/apache/lucene/util/CombinedBitSet.java @@ -0,0 +1,117 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.lucene.util; + +import org.apache.lucene.search.DocIdSetIterator; + +/** + * A {@link BitSet} implementation that combines two instances of {@link BitSet} and {@link Bits} + * to provide a single merged view. + */ +public final class CombinedBitSet extends BitSet implements Bits { + private final BitSet first; + private final Bits second; + private final int length; + + public CombinedBitSet(BitSet first, Bits second) { + this.first = first; + this.second = second; + this.length = first.length(); + } + + public BitSet getFirst() { + return first; + } + + /** + * This implementation is slow and requires to iterate over all bits to compute + * the intersection. Use {@link #approximateCardinality()} for + * a fast approximation. + */ + @Override + public int cardinality() { + int card = 0; + for (int i = 0; i < length; i++) { + card += get(i) ? 1 : 0; + } + return card; + } + + @Override + public int approximateCardinality() { + return first.cardinality(); + } + + @Override + public int prevSetBit(int index) { + assert index >= 0 && index < length : "index=" + index + ", numBits=" + length(); + int prev = first.prevSetBit(index); + while (prev != -1 && second.get(prev) == false) { + if (prev == 0) { + return -1; + } + prev = first.prevSetBit(prev-1); + } + return prev; + } + + @Override + public int nextSetBit(int index) { + assert index >= 0 && index < length : "index=" + index + " numBits=" + length(); + int next = first.nextSetBit(index); + while (next != DocIdSetIterator.NO_MORE_DOCS && second.get(next) == false) { + if (next == length() - 1) { + return DocIdSetIterator.NO_MORE_DOCS; + } + next = first.nextSetBit(next+1); + } + return next; + } + + @Override + public long ramBytesUsed() { + return first.ramBytesUsed(); + } + + @Override + public boolean get(int index) { + return first.get(index) && second.get(index); + } + + @Override + public int length() { + return length; + } + + @Override + public void set(int i) { + throw new UnsupportedOperationException("not implemented"); + } + + @Override + public void clear(int i) { + throw new UnsupportedOperationException("not implemented"); + } + + @Override + public void clear(int startIndex, int endIndex) { + throw new UnsupportedOperationException("not implemented"); + } +} diff --git a/server/src/main/java/org/elasticsearch/index/IndexModule.java b/server/src/main/java/org/elasticsearch/index/IndexModule.java index ca0f34803cc..6ef335144eb 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexModule.java +++ b/server/src/main/java/org/elasticsearch/index/IndexModule.java @@ -19,6 +19,10 @@ package org.elasticsearch.index; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.FilterDirectoryReader; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.LeafReader; import org.apache.lucene.search.similarities.BM25Similarity; import org.apache.lucene.search.similarities.Similarity; import org.apache.lucene.store.MMapDirectory; @@ -26,6 +30,7 @@ import org.apache.lucene.util.Constants; import org.apache.lucene.util.SetOnce; import org.elasticsearch.Version; import org.elasticsearch.client.Client; +import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.TriFunction; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Setting; @@ -38,10 +43,10 @@ import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.index.cache.query.DisabledQueryCache; import org.elasticsearch.index.cache.query.IndexQueryCache; import org.elasticsearch.index.cache.query.QueryCache; +import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.shard.IndexEventListener; -import org.elasticsearch.index.shard.IndexSearcherWrapper; import org.elasticsearch.index.shard.IndexingOperationListener; import org.elasticsearch.index.shard.SearchOperationListener; import org.elasticsearch.index.similarity.SimilarityService; @@ -112,7 +117,8 @@ public final class IndexModule { private final IndexSettings indexSettings; private final AnalysisRegistry analysisRegistry; private final EngineFactory engineFactory; - private SetOnce indexSearcherWrapper = new SetOnce<>(); + private SetOnce>> indexReaderWrapper = new SetOnce<>(); private final Set indexEventListeners = new HashSet<>(); private final Map> similarities = new HashMap<>(); private final Map directoryFactories; @@ -277,13 +283,26 @@ public final class IndexModule { } /** - * Sets a {@link org.elasticsearch.index.IndexModule.IndexSearcherWrapperFactory} that is called once the IndexService - * is fully constructed. - * Note: this method can only be called once per index. Multiple wrappers are not supported. + * Sets the factory for creating new {@link DirectoryReader} wrapper instances. + * The factory ({@link Function}) is called once the IndexService is fully constructed. + * NOTE: this method can only be called once per index. Multiple wrappers are not supported. + *

+ * The {@link CheckedFunction} is invoked each time a {@link Engine.Searcher} is requested to do an operation, + * for example search, and must return a new directory reader wrapping the provided directory reader or if no + * wrapping was performed the provided directory reader. + * The wrapped reader can filter out document just like delete documents etc. but must not change any term or + * document content. + * NOTE: The index reader wrapper ({@link CheckedFunction}) has a per-request lifecycle, + * must delegate {@link IndexReader#getReaderCacheHelper()}, {@link LeafReader#getCoreCacheHelper()} + * and must be an instance of {@link FilterDirectoryReader} that eventually exposes the original reader + * via {@link FilterDirectoryReader#getDelegate()}. + * The returned reader is closed once it goes out of scope. + *

*/ - public void setSearcherWrapper(IndexSearcherWrapperFactory indexSearcherWrapperFactory) { + public void setReaderWrapper(Function> indexReaderWrapperFactory) { ensureNotFrozen(); - this.indexSearcherWrapper.set(indexSearcherWrapperFactory); + this.indexReaderWrapper.set(indexReaderWrapperFactory); } IndexEventListener freeze() { // pkg private for testing @@ -348,16 +367,6 @@ public final class IndexModule { } - /** - * Factory for creating new {@link IndexSearcherWrapper} instances - */ - public interface IndexSearcherWrapperFactory { - /** - * Returns a new IndexSearcherWrapper. This method is called once per index per node - */ - IndexSearcherWrapper newWrapper(IndexService indexService); - } - public static Type defaultStoreType(final boolean allowMmap) { if (allowMmap && Constants.JRE_IS_64BIT && MMapDirectory.UNMAP_SUPPORTED) { return Type.HYBRIDFS; @@ -384,8 +393,8 @@ public final class IndexModule { NamedWriteableRegistry namedWriteableRegistry) throws IOException { final IndexEventListener eventListener = freeze(); - IndexSearcherWrapperFactory searcherWrapperFactory = indexSearcherWrapper.get() == null - ? (shard) -> null : indexSearcherWrapper.get(); + Function> readerWrapperFactory = + indexReaderWrapper.get() == null ? (shard) -> null : indexReaderWrapper.get(); eventListener.beforeIndexCreated(indexSettings.getIndex(), indexSettings.getSettings()); final IndexStorePlugin.DirectoryFactory directoryFactory = getDirectoryFactory(indexSettings, directoryFactories); final QueryCache queryCache; @@ -402,7 +411,7 @@ public final class IndexModule { return new IndexService(indexSettings, indexCreationContext, environment, xContentRegistry, new SimilarityService(indexSettings, scriptService, similarities), shardStoreDeleter, analysisRegistry, engineFactory, circuitBreakerService, bigArrays, threadPool, scriptService, - client, queryCache, directoryFactory, eventListener, searcherWrapperFactory, mapperRegistry, + client, queryCache, directoryFactory, eventListener, readerWrapperFactory, mapperRegistry, indicesFieldDataCache, searchOperationListeners, indexOperationListeners, namedWriteableRegistry); } diff --git a/server/src/main/java/org/elasticsearch/index/IndexService.java b/server/src/main/java/org/elasticsearch/index/IndexService.java index 6c51a7a6f5c..d09dcd02b89 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexService.java +++ b/server/src/main/java/org/elasticsearch/index/IndexService.java @@ -20,6 +20,7 @@ package org.elasticsearch.index; import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReaderContext; import org.apache.lucene.search.IndexSearcher; @@ -32,6 +33,7 @@ import org.elasticsearch.Version; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Setting; @@ -59,7 +61,6 @@ import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.seqno.RetentionLeaseSyncer; import org.elasticsearch.index.shard.IndexEventListener; -import org.elasticsearch.index.shard.IndexSearcherWrapper; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardClosedException; import org.elasticsearch.index.shard.IndexingOperationListener; @@ -91,6 +92,7 @@ import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Consumer; +import java.util.function.Function; import java.util.function.LongSupplier; import java.util.function.Supplier; @@ -106,7 +108,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust private final NodeEnvironment nodeEnv; private final ShardStoreDeleter shardStoreDeleter; private final IndexStorePlugin.DirectoryFactory directoryFactory; - private final IndexSearcherWrapper searcherWrapper; + private final CheckedFunction readerWrapper; private final IndexCache indexCache; private final MapperService mapperService; private final NamedXContentRegistry xContentRegistry; @@ -153,7 +155,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust QueryCache queryCache, IndexStorePlugin.DirectoryFactory directoryFactory, IndexEventListener eventListener, - IndexModule.IndexSearcherWrapperFactory wrapperFactory, + Function> wrapperFactory, MapperRegistry mapperRegistry, IndicesFieldDataCache indicesFieldDataCache, List searchOperationListeners, @@ -205,7 +207,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust this.directoryFactory = directoryFactory; this.engineFactory = Objects.requireNonNull(engineFactory); // initialize this last -- otherwise if the wrapper requires any other member to be non-null we fail with an NPE - this.searcherWrapper = wrapperFactory.newWrapper(this); + this.readerWrapper = wrapperFactory.apply(this); this.searchOperationListeners = Collections.unmodifiableList(searchOperationListeners); this.indexingOperationListeners = Collections.unmodifiableList(indexingOperationListeners); // kick off async ops for the first shard in this index @@ -418,7 +420,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust similarityService, engineFactory, eventListener, - searcherWrapper, + readerWrapper, threadPool, bigArrays, engineWarmer, @@ -756,8 +758,8 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust return engineFactory; } - final IndexSearcherWrapper getSearcherWrapper() { - return searcherWrapper; + final CheckedFunction getReaderWrapper() { + return readerWrapper; } // pkg private for testing final IndexStorePlugin.DirectoryFactory getDirectoryFactory() { diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexSearcherWrapper.java b/server/src/main/java/org/elasticsearch/index/shard/IndexSearcherWrapper.java deleted file mode 100644 index 3a6df72a740..00000000000 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexSearcherWrapper.java +++ /dev/null @@ -1,138 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.shard; - -import org.apache.lucene.index.DirectoryReader; -import org.apache.lucene.index.FilterDirectoryReader; -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.LeafReader; -import org.apache.lucene.search.IndexSearcher; -import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; -import org.elasticsearch.core.internal.io.IOUtils; -import org.elasticsearch.index.engine.Engine; - -import java.io.IOException; - -/** - * Extension point to add custom functionality at request time to the {@link DirectoryReader} - * and {@link IndexSearcher} managed by the {@link IndexShard}. - */ -public class IndexSearcherWrapper { - - /** - * Wraps the given {@link DirectoryReader}. The wrapped reader can filter out document just like delete documents etc. but - * must not change any term or document content. - *

- * NOTE: The wrapper has a per-request lifecycle, must delegate {@link IndexReader#getReaderCacheHelper()}, - * {@link LeafReader#getCoreCacheHelper()} and must be an instance of {@link FilterDirectoryReader} that - * eventually exposes the original reader via {@link FilterDirectoryReader#getDelegate()}. - * The returned reader is closed once it goes out of scope. - *

- * @param reader The provided directory reader to be wrapped to add custom functionality - * @return a new directory reader wrapping the provided directory reader or if no wrapping was performed - * the provided directory reader - */ - protected DirectoryReader wrap(DirectoryReader reader) throws IOException { - return reader; - } - - /** - * @param searcher The provided index searcher to be wrapped to add custom functionality - * @return a new index searcher wrapping the provided index searcher or if no wrapping was performed - * the provided index searcher - */ - protected IndexSearcher wrap(IndexSearcher searcher) throws IOException { - return searcher; - } - /** - * If there are configured {@link IndexSearcherWrapper} instances, the {@link IndexSearcher} of the provided engine searcher - * gets wrapped and a new {@link Engine.Searcher} instances is returned, otherwise the provided {@link Engine.Searcher} is returned. - * - * This is invoked each time a {@link Engine.Searcher} is requested to do an operation. (for example search) - */ - public final Engine.Searcher wrap(Engine.Searcher engineSearcher) throws IOException { - final ElasticsearchDirectoryReader elasticsearchDirectoryReader = - ElasticsearchDirectoryReader.getElasticsearchDirectoryReader(engineSearcher.getDirectoryReader()); - if (elasticsearchDirectoryReader == null) { - throw new IllegalStateException("Can't wrap non elasticsearch directory reader"); - } - NonClosingReaderWrapper nonClosingReaderWrapper = new NonClosingReaderWrapper(engineSearcher.getDirectoryReader()); - DirectoryReader reader = wrap(nonClosingReaderWrapper); - if (reader != nonClosingReaderWrapper) { - if (reader.getReaderCacheHelper() != elasticsearchDirectoryReader.getReaderCacheHelper()) { - throw new IllegalStateException("wrapped directory reader doesn't delegate IndexReader#getCoreCacheKey," + - " wrappers must override this method and delegate to the original readers core cache key. Wrapped readers can't be " + - "used as cache keys since their are used only per request which would lead to subtle bugs"); - } - if (ElasticsearchDirectoryReader.getElasticsearchDirectoryReader(reader) != elasticsearchDirectoryReader) { - // prevent that somebody wraps with a non-filter reader - throw new IllegalStateException("wrapped directory reader hides actual ElasticsearchDirectoryReader but shouldn't"); - } - } - - final IndexSearcher origIndexSearcher = engineSearcher.searcher(); - final IndexSearcher innerIndexSearcher = new IndexSearcher(reader); - innerIndexSearcher.setQueryCache(origIndexSearcher.getQueryCache()); - innerIndexSearcher.setQueryCachingPolicy(origIndexSearcher.getQueryCachingPolicy()); - innerIndexSearcher.setSimilarity(origIndexSearcher.getSimilarity()); - // TODO: Right now IndexSearcher isn't wrapper friendly, when it becomes wrapper friendly we should revise this extension point - // For example if IndexSearcher#rewrite() is overwritten than also IndexSearcher#createNormalizedWeight needs to be overwritten - // This needs to be fixed before we can allow the IndexSearcher from Engine to be wrapped multiple times - final IndexSearcher indexSearcher = wrap(innerIndexSearcher); - if (reader == nonClosingReaderWrapper && indexSearcher == innerIndexSearcher) { - return engineSearcher; - } else { - // we close the reader to make sure wrappers can release resources if needed.... - // our NonClosingReaderWrapper makes sure that our reader is not closed - return new Engine.Searcher(engineSearcher.source(), indexSearcher, () -> - IOUtils.close(indexSearcher.getIndexReader(), // this will close the wrappers excluding the NonClosingReaderWrapper - engineSearcher)); // this will run the closeable on the wrapped engine searcher - } - } - - private static final class NonClosingReaderWrapper extends FilterDirectoryReader { - - private NonClosingReaderWrapper(DirectoryReader in) throws IOException { - super(in, new SubReaderWrapper() { - @Override - public LeafReader wrap(LeafReader reader) { - return reader; - } - }); - } - - @Override - protected DirectoryReader doWrapDirectoryReader(DirectoryReader in) throws IOException { - return new NonClosingReaderWrapper(in); - } - - @Override - protected void doClose() throws IOException { - // don't close here - mimic the MultiReader#doClose = false behavior that FilterDirectoryReader doesn't have - } - - @Override - public CacheHelper getReaderCacheHelper() { - return in.getReaderCacheHelper(); - } - - } - -} diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 7b4e06a451c..d0733017845 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -23,9 +23,13 @@ import com.carrotsearch.hppc.ObjectLongMap; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.index.CheckIndex; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.FilterDirectoryReader; import org.apache.lucene.index.IndexCommit; +import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.index.Term; +import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.ReferenceManager; @@ -50,6 +54,7 @@ import org.elasticsearch.cluster.routing.RecoverySource; import org.elasticsearch.cluster.routing.RecoverySource.SnapshotRecoverySource; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.Booleans; +import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.CheckedRunnable; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.collect.Tuple; @@ -243,7 +248,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl private static final EnumSet writeAllowedStates = EnumSet.of(IndexShardState.RECOVERING, IndexShardState.POST_RECOVERY, IndexShardState.STARTED); - private final IndexSearcherWrapper searcherWrapper; + private final CheckedFunction readerWrapper; /** * True if this shard is still indexing (recently) and false if we've been idle for long enough (as periodically checked by {@link @@ -269,7 +274,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl final SimilarityService similarityService, final @Nullable EngineFactory engineFactory, final IndexEventListener indexEventListener, - final IndexSearcherWrapper indexSearcherWrapper, + final CheckedFunction indexReaderWrapper, final ThreadPool threadPool, final BigArrays bigArrays, final Engine.Warmer warmer, @@ -349,7 +354,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl cachingPolicy = new UsageTrackingQueryCachingPolicy(); } indexShardOperationPermits = new IndexShardOperationPermits(shardId, threadPool); - searcherWrapper = indexSearcherWrapper; + readerWrapper = indexReaderWrapper; refreshListeners = buildRefreshListeners(); lastSearcherAccess.set(threadPool.relativeTimeInMillis()); persistMetadata(path, indexSettings, shardRouting, null, logger); @@ -1230,7 +1235,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl != null : "DirectoryReader must be an instance or ElasticsearchDirectoryReader"; boolean success = false; try { - final Engine.Searcher wrappedSearcher = searcherWrapper == null ? searcher : searcherWrapper.wrap(searcher); + final Engine.Searcher wrappedSearcher = readerWrapper == null ? searcher : wrapSearcher(searcher, readerWrapper); assert wrappedSearcher != null; success = true; return wrappedSearcher; @@ -1243,6 +1248,72 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl } } + static Engine.Searcher wrapSearcher(Engine.Searcher engineSearcher, + CheckedFunction readerWrapper) throws IOException { + assert readerWrapper != null; + final ElasticsearchDirectoryReader elasticsearchDirectoryReader = + ElasticsearchDirectoryReader.getElasticsearchDirectoryReader(engineSearcher.getDirectoryReader()); + if (elasticsearchDirectoryReader == null) { + throw new IllegalStateException("Can't wrap non elasticsearch directory reader"); + } + NonClosingReaderWrapper nonClosingReaderWrapper = new NonClosingReaderWrapper(engineSearcher.getDirectoryReader()); + DirectoryReader reader = readerWrapper.apply(nonClosingReaderWrapper); + if (reader != nonClosingReaderWrapper) { + if (reader.getReaderCacheHelper() != elasticsearchDirectoryReader.getReaderCacheHelper()) { + throw new IllegalStateException("wrapped directory reader doesn't delegate IndexReader#getCoreCacheKey," + + " wrappers must override this method and delegate to the original readers core cache key. Wrapped readers can't be " + + "used as cache keys since their are used only per request which would lead to subtle bugs"); + } + if (ElasticsearchDirectoryReader.getElasticsearchDirectoryReader(reader) != elasticsearchDirectoryReader) { + // prevent that somebody wraps with a non-filter reader + throw new IllegalStateException("wrapped directory reader hides actual ElasticsearchDirectoryReader but shouldn't"); + } + } + + if (reader == nonClosingReaderWrapper) { + return engineSearcher; + } else { + final IndexSearcher origIndexSearcher = engineSearcher.searcher(); + final IndexSearcher newIndexSearcher = new IndexSearcher(reader); + newIndexSearcher.setQueryCache(origIndexSearcher.getQueryCache()); + newIndexSearcher.setQueryCachingPolicy(origIndexSearcher.getQueryCachingPolicy()); + newIndexSearcher.setSimilarity(origIndexSearcher.getSimilarity()); + // we close the reader to make sure wrappers can release resources if needed.... + // our NonClosingReaderWrapper makes sure that our reader is not closed + return new Engine.Searcher(engineSearcher.source(), newIndexSearcher, () -> + IOUtils.close(newIndexSearcher.getIndexReader(), // this will close the wrappers excluding the NonClosingReaderWrapper + engineSearcher)); // this will run the closeable on the wrapped engine searcher + } + } + + private static final class NonClosingReaderWrapper extends FilterDirectoryReader { + + private NonClosingReaderWrapper(DirectoryReader in) throws IOException { + super(in, new SubReaderWrapper() { + @Override + public LeafReader wrap(LeafReader reader) { + return reader; + } + }); + } + + @Override + protected DirectoryReader doWrapDirectoryReader(DirectoryReader in) throws IOException { + return new NonClosingReaderWrapper(in); + } + + @Override + protected void doClose() throws IOException { + // don't close here - mimic the MultiReader#doClose = false behavior that FilterDirectoryReader doesn't have + } + + @Override + public CacheHelper getReaderCacheHelper() { + return in.getReaderCacheHelper(); + } + + } + public void close(String reason, boolean flushEngine) throws IOException { synchronized (mutex) { try { diff --git a/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java b/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java index d9a7fdb831e..ee0690117d8 100644 --- a/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java @@ -175,7 +175,8 @@ final class DefaultSearchContext extends SearchContext { this.indexShard = indexShard; this.indexService = indexService; this.clusterService = clusterService; - this.searcher = new ContextIndexSearcher(engineSearcher, indexService.cache().query(), indexShard.getQueryCachingPolicy()); + this.searcher = new ContextIndexSearcher(engineSearcher.reader(), engineSearcher.searcher().getSimilarity(), + indexService.cache().query(), indexShard.getQueryCachingPolicy()); this.relativeTimeSupplier = relativeTimeSupplier; this.timeout = timeout; this.minNodeVersion = minNodeVersion; diff --git a/server/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java b/server/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java index 49c310ba706..794c72ec14c 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java +++ b/server/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java @@ -20,14 +20,19 @@ package org.elasticsearch.search.internal; import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.Term; import org.apache.lucene.index.TermStates; import org.apache.lucene.search.BulkScorer; import org.apache.lucene.search.CollectionStatistics; +import org.apache.lucene.search.CollectionTerminatedException; import org.apache.lucene.search.Collector; +import org.apache.lucene.search.ConjunctionDISI; +import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Explanation; import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.LeafCollector; import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryCache; import org.apache.lucene.search.QueryCachingPolicy; @@ -35,9 +40,13 @@ import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.TermStatistics; import org.apache.lucene.search.Weight; -import org.apache.lucene.search.XIndexSearcher; +import org.apache.lucene.search.similarities.Similarity; +import org.apache.lucene.util.BitSet; +import org.apache.lucene.util.BitSetIterator; +import org.apache.lucene.util.Bits; +import org.apache.lucene.util.CombinedBitSet; +import org.apache.lucene.util.SparseFixedBitSet; import org.elasticsearch.common.lease.Releasable; -import org.elasticsearch.index.engine.Engine; import org.elasticsearch.search.dfs.AggregatedDfs; import org.elasticsearch.search.profile.Timer; import org.elasticsearch.search.profile.query.ProfileWeight; @@ -46,6 +55,7 @@ import org.elasticsearch.search.profile.query.QueryProfiler; import org.elasticsearch.search.profile.query.QueryTimingType; import java.io.IOException; +import java.util.Arrays; import java.util.List; import java.util.Set; @@ -53,26 +63,19 @@ import java.util.Set; * Context-aware extension of {@link IndexSearcher}. */ public class ContextIndexSearcher extends IndexSearcher implements Releasable { - - /** The wrapped {@link IndexSearcher}. The reason why we sometimes prefer delegating to this searcher instead of {@code super} is that - * this instance may have more assertions, for example if it comes from MockInternalEngine which wraps the IndexSearcher into an - * AssertingIndexSearcher. */ - private final XIndexSearcher in; + /** + * The interval at which we check for search cancellation when we cannot use + * a {@link CancellableBulkScorer}. See {@link #intersectScorerAndBitSet}. + */ + private static int CHECK_CANCELLED_SCORER_INTERVAL = 1 << 11; private AggregatedDfs aggregatedDfs; - - private final Engine.Searcher engineSearcher; - - // TODO revisit moving the profiler to inheritance or wrapping model in the future private QueryProfiler profiler; - private Runnable checkCancelled; - public ContextIndexSearcher(Engine.Searcher searcher, QueryCache queryCache, QueryCachingPolicy queryCachingPolicy) { - super(searcher.reader()); - engineSearcher = searcher; - in = new XIndexSearcher(searcher.searcher()); - setSimilarity(searcher.searcher().getSimilarity()); + public ContextIndexSearcher(IndexReader reader, Similarity similarity, QueryCache queryCache, QueryCachingPolicy queryCachingPolicy) { + super(reader); + setSimilarity(similarity); setQueryCache(queryCache); setQueryCachingPolicy(queryCachingPolicy); } @@ -104,7 +107,7 @@ public class ContextIndexSearcher extends IndexSearcher implements Releasable { } try { - return in.rewrite(original); + return super.rewrite(original); } finally { if (profiler != null) { profiler.stopAndAddRewriteTime(); @@ -130,7 +133,6 @@ public class ContextIndexSearcher extends IndexSearcher implements Releasable { } return new ProfileWeight(query, weight, profile); } else { - // needs to be 'super', not 'in' in order to use aggregated DFS return super.createWeight(query, scoreMode, boost); } } @@ -158,7 +160,6 @@ public class ContextIndexSearcher extends IndexSearcher implements Releasable { @Override public Scorer scorer(LeafReaderContext context) throws IOException { - // in case the wrapped searcher (in) uses the scorer directly return weight.scorer(context); } @@ -175,16 +176,75 @@ public class ContextIndexSearcher extends IndexSearcher implements Releasable { } else { cancellableWeight = weight; } - in.search(leaves, cancellableWeight, collector); + searchInternal(leaves, cancellableWeight, collector); } - @Override - public Explanation explain(Query query, int doc) throws IOException { - if (aggregatedDfs != null) { - // dfs data is needed to explain the score - return super.explain(createWeight(rewrite(query), ScoreMode.COMPLETE, 1f), doc); + private void searchInternal(List leaves, Weight weight, Collector collector) throws IOException { + for (LeafReaderContext ctx : leaves) { // search each subreader + final LeafCollector leafCollector; + try { + leafCollector = collector.getLeafCollector(ctx); + } catch (CollectionTerminatedException e) { + // there is no doc of interest in this reader context + // continue with the following leaf + continue; + } + Bits liveDocs = ctx.reader().getLiveDocs(); + BitSet liveDocsBitSet = getSparseBitSetOrNull(ctx.reader().getLiveDocs()); + if (liveDocsBitSet == null) { + BulkScorer bulkScorer = weight.bulkScorer(ctx); + if (bulkScorer != null) { + try { + bulkScorer.score(leafCollector, liveDocs); + } catch (CollectionTerminatedException e) { + // collection was terminated prematurely + // continue with the following leaf + } + } + } else { + // if the role query result set is sparse then we should use the SparseFixedBitSet for advancing: + Scorer scorer = weight.scorer(ctx); + if (scorer != null) { + try { + intersectScorerAndBitSet(scorer, liveDocsBitSet, leafCollector, + checkCancelled == null ? () -> {} : checkCancelled); + } catch (CollectionTerminatedException e) { + // collection was terminated prematurely + // continue with the following leaf + } + } + } } - return in.explain(query, doc); + } + + private static BitSet getSparseBitSetOrNull(Bits liveDocs) { + if (liveDocs instanceof SparseFixedBitSet) { + return (BitSet) liveDocs; + } else if (liveDocs instanceof CombinedBitSet + // if the underlying role bitset is sparse + && ((CombinedBitSet) liveDocs).getFirst() instanceof SparseFixedBitSet) { + return (BitSet) liveDocs; + } else { + return null; + } + + } + + static void intersectScorerAndBitSet(Scorer scorer, BitSet acceptDocs, + LeafCollector collector, Runnable checkCancelled) throws IOException { + // ConjunctionDISI uses the DocIdSetIterator#cost() to order the iterators, so if roleBits has the lowest cardinality it should + // be used first: + DocIdSetIterator iterator = ConjunctionDISI.intersectIterators(Arrays.asList(new BitSetIterator(acceptDocs, + acceptDocs.approximateCardinality()), scorer.iterator())); + int seen = 0; + checkCancelled.run(); + for (int docId = iterator.nextDoc(); docId < DocIdSetIterator.NO_MORE_DOCS; docId = iterator.nextDoc()) { + if (++seen % CHECK_CANCELLED_SCORER_INTERVAL == 0) { + checkCancelled.run(); + } + collector.collect(docId); + } + checkCancelled.run(); } @Override @@ -216,10 +276,8 @@ public class ContextIndexSearcher extends IndexSearcher implements Releasable { } public DirectoryReader getDirectoryReader() { - return engineSearcher.getDirectoryReader(); - } - - public Engine.Searcher getEngineSearcher() { - return engineSearcher; + final IndexReader reader = getIndexReader(); + assert reader instanceof DirectoryReader : "expected an instance of DirectoryReader, got " + reader.getClass(); + return (DirectoryReader) reader; } } diff --git a/server/src/test/java/org/apache/lucene/util/CombinedBitSetTests.java b/server/src/test/java/org/apache/lucene/util/CombinedBitSetTests.java new file mode 100644 index 00000000000..65165272f54 --- /dev/null +++ b/server/src/test/java/org/apache/lucene/util/CombinedBitSetTests.java @@ -0,0 +1,111 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.lucene.util; + +import org.apache.lucene.search.DocIdSetIterator; +import org.elasticsearch.test.ESTestCase; + +public class CombinedBitSetTests extends ESTestCase { + public void testEmpty() { + for (float percent : new float[] {0f, 0.1f, 0.5f, 0.9f, 1f}) { + testCase(randomIntBetween(1, 10000), 0f, percent); + testCase(randomIntBetween(1, 10000), percent, 0f); + } + } + + public void testSparse() { + for (float percent : new float[] {0f, 0.1f, 0.5f, 0.9f, 1f}) { + testCase(randomIntBetween(1, 10000), 0.1f, percent); + testCase(randomIntBetween(1, 10000), percent, 0.1f); + } + } + + public void testDense() { + for (float percent : new float[] {0f, 0.1f, 0.5f, 0.9f, 1f}) { + testCase(randomIntBetween(1, 10000), 0.9f, percent); + testCase(randomIntBetween(1, 10000), percent, 0.9f); + } + } + + public void testRandom() { + int iterations = atLeast(10); + for (int i = 0; i < iterations; i++) { + testCase(randomIntBetween(1, 10000), randomFloat(), randomFloat()); + } + } + + private void testCase(int numBits, float percent1, float percent2) { + BitSet first = randomSet(numBits, percent1); + BitSet second = randomSet(numBits, percent2); + CombinedBitSet actual = new CombinedBitSet(first, second); + FixedBitSet expected = new FixedBitSet(numBits); + or(expected, first); + and(expected, second); + assertEquals(expected.cardinality(), actual.cardinality()); + assertEquals(expected, actual, numBits); + for (int i = 0; i < numBits; ++i) { + assertEquals(expected.nextSetBit(i), actual.nextSetBit(i)); + assertEquals(Integer.toString(i), expected.prevSetBit(i), actual.prevSetBit(i)); + } + } + + private void or(BitSet set1, BitSet set2) { + int next = 0; + while (next < set2.length() && (next = set2.nextSetBit(next)) != DocIdSetIterator.NO_MORE_DOCS) { + set1.set(next); + next += 1; + } + } + + private void and(BitSet set1, BitSet set2) { + int next = 0; + while (next < set1.length() && (next = set1.nextSetBit(next)) != DocIdSetIterator.NO_MORE_DOCS) { + if (set2.get(next) == false) { + set1.clear(next); + } + next += 1; + } + } + + private void assertEquals(BitSet set1, BitSet set2, int maxDoc) { + for (int i = 0; i < maxDoc; ++i) { + assertEquals("Different at " + i, set1.get(i), set2.get(i)); + } + } + + private BitSet randomSet(int numBits, float percentSet) { + return randomSet(numBits, (int) (percentSet * numBits)); + } + + private BitSet randomSet(int numBits, int numBitsSet) { + assert numBitsSet <= numBits; + final BitSet set = randomBoolean() ? new SparseFixedBitSet(numBits) : new FixedBitSet(numBits); + for (int i = 0; i < numBitsSet; ++i) { + while (true) { + final int o = random().nextInt(numBits); + if (set.get(o) == false) { + set.set(o); + break; + } + } + } + return set; + } +} diff --git a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java index d0f811007a6..d052fa365be 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java @@ -23,7 +23,6 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.FieldInvertState; import org.apache.lucene.index.Term; import org.apache.lucene.search.CollectionStatistics; -import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.TermStatistics; import org.apache.lucene.search.Weight; @@ -34,6 +33,7 @@ import org.apache.lucene.util.SetOnce.AlreadySetException; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -50,14 +50,12 @@ import org.elasticsearch.index.cache.query.DisabledQueryCache; import org.elasticsearch.index.cache.query.IndexQueryCache; import org.elasticsearch.index.cache.query.QueryCache; import org.elasticsearch.index.engine.Engine; -import org.elasticsearch.index.engine.EngineException; import org.elasticsearch.index.engine.InternalEngineFactory; import org.elasticsearch.index.engine.InternalEngineTests; import org.elasticsearch.index.fielddata.IndexFieldDataCache; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.shard.IndexEventListener; -import org.elasticsearch.index.shard.IndexSearcherWrapper; import org.elasticsearch.index.shard.IndexingOperationListener; import org.elasticsearch.index.shard.SearchOperationListener; import org.elasticsearch.index.shard.ShardId; @@ -159,10 +157,10 @@ public class IndexModuleTests extends ESTestCase { public void testWrapperIsBound() throws IOException { final MockEngineFactory engineFactory = new MockEngineFactory(AssertingDirectoryReader.class); IndexModule module = new IndexModule(indexSettings, emptyAnalysisRegistry, engineFactory, Collections.emptyMap()); - module.setSearcherWrapper((s) -> new Wrapper()); + module.setReaderWrapper(s -> new Wrapper()); IndexService indexService = newIndexService(module); - assertTrue(indexService.getSearcherWrapper() instanceof Wrapper); + assertTrue(indexService.getReaderWrapper() instanceof Wrapper); assertSame(indexService.getEngineFactory(), module.getEngineFactory()); indexService.close("simon says", false); } @@ -321,7 +319,7 @@ public class IndexModuleTests extends ESTestCase { assertEquals(msg, expectThrows(IllegalStateException.class, () -> module.addIndexEventListener(null)).getMessage()); assertEquals(msg, expectThrows(IllegalStateException.class, () -> module.addIndexOperationListener(null)).getMessage()); assertEquals(msg, expectThrows(IllegalStateException.class, () -> module.addSimilarity(null, null)).getMessage()); - assertEquals(msg, expectThrows(IllegalStateException.class, () -> module.setSearcherWrapper(null)).getMessage()); + assertEquals(msg, expectThrows(IllegalStateException.class, () -> module.setReaderWrapper(null)).getMessage()); assertEquals(msg, expectThrows(IllegalStateException.class, () -> module.forceQueryCacheProvider(null)).getMessage()); } @@ -455,15 +453,9 @@ public class IndexModuleTests extends ESTestCase { } } - public static final class Wrapper extends IndexSearcherWrapper { - + public static final class Wrapper implements CheckedFunction { @Override - public DirectoryReader wrap(DirectoryReader reader) { - return null; - } - - @Override - public IndexSearcher wrap(IndexSearcher searcher) throws EngineException { + public DirectoryReader apply(DirectoryReader reader) { return null; } } diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 38ade1ec746..09872b418cc 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -128,7 +128,6 @@ import org.elasticsearch.index.seqno.RetentionLease; import org.elasticsearch.index.seqno.RetentionLeases; import org.elasticsearch.index.seqno.SeqNoStats; import org.elasticsearch.index.seqno.SequenceNumbers; -import org.elasticsearch.index.shard.IndexSearcherWrapper; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardPath; import org.elasticsearch.index.shard.ShardUtils; @@ -723,36 +722,6 @@ public class InternalEngineTests extends EngineTestCase { } } - public void testIndexSearcherWrapper() throws Exception { - final AtomicInteger counter = new AtomicInteger(); - IndexSearcherWrapper wrapper = new IndexSearcherWrapper() { - - @Override - public DirectoryReader wrap(DirectoryReader reader) { - counter.incrementAndGet(); - return reader; - } - - @Override - public IndexSearcher wrap(IndexSearcher searcher) throws EngineException { - counter.incrementAndGet(); - return searcher; - } - }; - Store store = createStore(); - Path translog = createTempDir("translog-test"); - InternalEngine engine = createEngine(store, translog); - engine.close(); - - engine = new InternalEngine(engine.config()); - assertTrue(engine.isRecovering()); - engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); - Engine.Searcher searcher = wrapper.wrap(engine.acquireSearcher("test")); - assertThat(counter.get(), equalTo(2)); - searcher.close(); - IOUtils.close(store, engine); - } - public void testFlushIsDisabledDuringTranslogRecovery() throws IOException { assertFalse(engine.isRecovering()); ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), SOURCE, null); diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexSearcherWrapperTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexReaderWrapperTests.java similarity index 65% rename from server/src/test/java/org/elasticsearch/index/shard/IndexSearcherWrapperTests.java rename to server/src/test/java/org/elasticsearch/index/shard/IndexReaderWrapperTests.java index 7a422e82c22..6abcacc1581 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexSearcherWrapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexReaderWrapperTests.java @@ -28,32 +28,24 @@ import org.apache.lucene.index.FilterDirectoryReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.LeafReader; -import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.Term; -import org.apache.lucene.search.Collector; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; -import org.apache.lucene.search.TotalHitCountCollector; -import org.apache.lucene.search.Weight; import org.apache.lucene.store.Directory; +import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; import org.elasticsearch.index.engine.Engine; -import org.elasticsearch.index.engine.EngineException; -import org.elasticsearch.search.internal.ContextIndexSearcher; import org.elasticsearch.test.ESTestCase; import java.io.IOException; import java.util.Collections; -import java.util.List; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; -import static org.hamcrest.Matchers.equalTo; - -public class IndexSearcherWrapperTests extends ESTestCase { +public class IndexReaderWrapperTests extends ESTestCase { public void testReaderCloseListenerIsCalled() throws IOException { Directory dir = newDirectory(); @@ -67,23 +59,13 @@ public class IndexSearcherWrapperTests extends ESTestCase { IndexSearcher searcher = new IndexSearcher(open); assertEquals(1, searcher.search(new TermQuery(new Term("field", "doc")), 1).totalHits.value); final AtomicInteger closeCalls = new AtomicInteger(0); - IndexSearcherWrapper wrapper = new IndexSearcherWrapper() { - @Override - public DirectoryReader wrap(DirectoryReader reader) throws IOException { - return new FieldMaskingReader("field", reader, closeCalls); - } - - @Override - public IndexSearcher wrap(IndexSearcher searcher) throws EngineException { - return searcher; - } - - }; + CheckedFunction wrapper = + reader -> new FieldMaskingReader("field", reader, closeCalls); final int sourceRefCount = open.getRefCount(); final AtomicInteger count = new AtomicInteger(); final AtomicInteger outerCount = new AtomicInteger(); final AtomicBoolean closeCalled = new AtomicBoolean(false); - final Engine.Searcher wrap = wrapper.wrap(new Engine.Searcher("foo", searcher, () -> closeCalled.set(true))); + final Engine.Searcher wrap = IndexShard.wrapSearcher(new Engine.Searcher("foo", searcher, () -> closeCalled.set(true)), wrapper); assertEquals(1, wrap.reader().getRefCount()); ElasticsearchDirectoryReader.addReaderCloseListener(wrap.getDirectoryReader(), key -> { if (key == open.getReaderCacheHelper().getKey()) { @@ -118,20 +100,11 @@ public class IndexSearcherWrapperTests extends ESTestCase { assertEquals(1, searcher.search(new TermQuery(new Term("field", "doc")), 1).totalHits.value); searcher.setSimilarity(iwc.getSimilarity()); final AtomicInteger closeCalls = new AtomicInteger(0); - IndexSearcherWrapper wrapper = new IndexSearcherWrapper() { - @Override - public DirectoryReader wrap(DirectoryReader reader) throws IOException { - return new FieldMaskingReader("field", reader, closeCalls); - } - - @Override - public IndexSearcher wrap(IndexSearcher searcher) throws EngineException { - return searcher; - } - }; + CheckedFunction wrapper = + reader -> new FieldMaskingReader("field", reader, closeCalls); final ConcurrentHashMap cache = new ConcurrentHashMap<>(); AtomicBoolean closeCalled = new AtomicBoolean(false); - try (Engine.Searcher wrap = wrapper.wrap(new Engine.Searcher("foo", searcher, () -> closeCalled.set(true)))) { + try (Engine.Searcher wrap = IndexShard.wrapSearcher(new Engine.Searcher("foo", searcher, () -> closeCalled.set(true)), wrapper)) { ElasticsearchDirectoryReader.addReaderCloseListener(wrap.getDirectoryReader(), key -> { cache.remove(key); }); @@ -159,58 +132,14 @@ public class IndexSearcherWrapperTests extends ESTestCase { IndexSearcher searcher = new IndexSearcher(open); assertEquals(1, searcher.search(new TermQuery(new Term("field", "doc")), 1).totalHits.value); searcher.setSimilarity(iwc.getSimilarity()); - IndexSearcherWrapper wrapper = new IndexSearcherWrapper(); + CheckedFunction wrapper = directoryReader -> directoryReader; try (Engine.Searcher engineSearcher = new Engine.Searcher("foo", searcher, open::close)) { - final Engine.Searcher wrap = wrapper.wrap(engineSearcher); + final Engine.Searcher wrap = IndexShard.wrapSearcher(engineSearcher, wrapper); assertSame(wrap, engineSearcher); } IOUtils.close(writer, dir); } - public void testWrapVisibility() throws IOException { - Directory dir = newDirectory(); - IndexWriterConfig iwc = newIndexWriterConfig(); - IndexWriter writer = new IndexWriter(dir, iwc); - Document doc = new Document(); - doc.add(new StringField("id", "1", random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); - doc.add(new TextField("field", "doc", random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); - writer.addDocument(doc); - DirectoryReader open = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "_na_", 1)); - IndexSearcher searcher = new IndexSearcher(open); - assertEquals(1, searcher.search(new TermQuery(new Term("field", "doc")), 1).totalHits.value); - IndexSearcherWrapper wrapper = new IndexSearcherWrapper() { - @Override - public DirectoryReader wrap(DirectoryReader reader) throws IOException { - return reader; - } - - @Override - public IndexSearcher wrap(IndexSearcher searcher) throws EngineException { - return new IndexSearcher(searcher.getIndexReader()) { - @Override - protected void search(List leaves, Weight weight, Collector collector) throws IOException { - throw new IllegalStateException("boum"); - } - }; - } - - }; - final AtomicBoolean closeCalled = new AtomicBoolean(false); - final Engine.Searcher wrap = wrapper.wrap(new Engine.Searcher("foo", searcher, () -> closeCalled.set(true))); - assertEquals(1, wrap.reader().getRefCount()); - ContextIndexSearcher contextSearcher = new ContextIndexSearcher(wrap, wrap.searcher().getQueryCache(), - wrap.searcher().getQueryCachingPolicy()); - IllegalStateException exc = expectThrows(IllegalStateException.class, - () -> contextSearcher.search(new TermQuery(new Term("field", "doc")), new TotalHitCountCollector())); - assertThat(exc.getMessage(), equalTo("boum")); - wrap.close(); - assertFalse("wrapped reader is closed", wrap.reader().tryIncRef()); - assertTrue(closeCalled.get()); - - IOUtils.close(open, writer, dir); - assertEquals(0, open.getRefCount()); - } - private static class FieldMaskingReader extends FilterDirectoryReader { private final String field; private final AtomicInteger closeCalls; diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java index 929a5a093a0..b3b0c61aa53 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.index.shard; +import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.store.LockObtainFailedException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; @@ -40,6 +41,7 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.CheckedRunnable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; @@ -528,7 +530,7 @@ public class IndexShardIT extends ESSingleNodeTestCase { client().prepareIndex("test", "test", "1").setSource("{\"foo\" : \"bar\"}", XContentType.JSON) .setRefreshPolicy(IMMEDIATE).get(); - IndexSearcherWrapper wrapper = new IndexSearcherWrapper() {}; + CheckedFunction wrapper = directoryReader -> directoryReader; shard.close("simon says", false); AtomicReference shardRef = new AtomicReference<>(); List failures = new ArrayList<>(); @@ -646,10 +648,10 @@ public class IndexShardIT extends ESSingleNodeTestCase { } public static final IndexShard newIndexShard( - final IndexService indexService, - final IndexShard shard,IndexSearcherWrapper wrapper, - final CircuitBreakerService cbs, - final IndexingOperationListener... listeners) throws IOException { + final IndexService indexService, + final IndexShard shard, CheckedFunction wrapper, + final CircuitBreakerService cbs, + final IndexingOperationListener... listeners) throws IOException { ShardRouting initializingShardRouting = getInitializingShardRouting(shard.routingEntry()); return new IndexShard( initializingShardRouting, diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 1f08107d6dc..572576736e3 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -23,7 +23,6 @@ import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.Term; -import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.store.AlreadyClosedException; @@ -54,6 +53,7 @@ import org.elasticsearch.cluster.routing.ShardRoutingHelper; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.breaker.CircuitBreaker; @@ -82,7 +82,6 @@ import org.elasticsearch.index.engine.CommitStats; import org.elasticsearch.index.engine.DocIdSeqNoAndSource; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.Engine.DeleteResult; -import org.elasticsearch.index.engine.EngineException; import org.elasticsearch.index.engine.EngineTestCase; import org.elasticsearch.index.engine.InternalEngine; import org.elasticsearch.index.engine.InternalEngineFactory; @@ -2368,7 +2367,7 @@ public class IndexShardTests extends IndexShardTestCase { closeShards(target); } - public void testSearcherWrapperIsUsed() throws IOException { + public void testReaderWrapperIsUsed() throws IOException { IndexShard shard = newStartedShard(true); indexDoc(shard, "_doc", "0", "{\"foo\" : \"bar\"}"); indexDoc(shard, "_doc", "1", "{\"foobar\" : \"bar\"}"); @@ -2386,17 +2385,7 @@ public class IndexShardTests extends IndexShardTestCase { search = searcher.searcher().search(new TermQuery(new Term("foobar", "bar")), 10); assertEquals(search.totalHits.value, 1); } - IndexSearcherWrapper wrapper = new IndexSearcherWrapper() { - @Override - public DirectoryReader wrap(DirectoryReader reader) throws IOException { - return new FieldMaskingReader("foo", reader); - } - - @Override - public IndexSearcher wrap(IndexSearcher searcher) throws EngineException { - return searcher; - } - }; + CheckedFunction wrapper = reader -> new FieldMaskingReader("foo", reader); closeShards(shard); IndexShard newShard = newShard( ShardRoutingHelper.initWithSameId(shard.routingEntry(), RecoverySource.ExistingStoreRecoverySource.INSTANCE), @@ -2428,18 +2417,8 @@ public class IndexShardTests extends IndexShardTestCase { closeShards(newShard); } - public void testSearcherWrapperWorksWithGlobalOrdinals() throws IOException { - IndexSearcherWrapper wrapper = new IndexSearcherWrapper() { - @Override - public DirectoryReader wrap(DirectoryReader reader) throws IOException { - return new FieldMaskingReader("foo", reader); - } - - @Override - public IndexSearcher wrap(IndexSearcher searcher) throws EngineException { - return searcher; - } - }; + public void testReaderWrapperWorksWithGlobalOrdinals() throws IOException { + CheckedFunction wrapper = reader -> new FieldMaskingReader("foo", reader); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) @@ -2540,16 +2519,8 @@ public class IndexShardTests extends IndexShardTestCase { IndexShard shard = newStartedShard(true); indexDoc(shard, "_doc", "0", "{\"foo\" : \"bar\"}"); shard.refresh("test"); - IndexSearcherWrapper wrapper = new IndexSearcherWrapper() { - @Override - public DirectoryReader wrap(DirectoryReader reader) throws IOException { - throw new RuntimeException("boom"); - } - - @Override - public IndexSearcher wrap(IndexSearcher searcher) throws EngineException { - return searcher; - } + CheckedFunction wrapper = reader -> { + throw new RuntimeException("boom"); }; closeShards(shard); diff --git a/server/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java b/server/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java index 6b4ddcf1ae6..a48196e0ff7 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java @@ -18,17 +18,18 @@ */ package org.elasticsearch.indices; +import org.apache.lucene.index.DirectoryReader; import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.Engine; -import org.elasticsearch.index.shard.IndexSearcherWrapper; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardIT; import org.elasticsearch.index.shard.IndexShardTestCase; @@ -426,7 +427,7 @@ public class IndexingMemoryControllerTests extends ESSingleNodeTestCase { client().prepareIndex("test", "test", Integer.toString(i)).setSource("{\"foo\" : \"bar\"}", XContentType.JSON).get(); } - IndexSearcherWrapper wrapper = new IndexSearcherWrapper() {}; + CheckedFunction wrapper = directoryReader -> directoryReader; shard.close("simon says", false); AtomicReference shardRef = new AtomicReference<>(); Settings settings = Settings.builder().put("indices.memory.index_buffer_size", "50kb").build(); diff --git a/server/src/test/java/org/elasticsearch/search/internal/ContextIndexSearcherTests.java b/server/src/test/java/org/elasticsearch/search/internal/ContextIndexSearcherTests.java new file mode 100644 index 00000000000..ed2f972562c --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/internal/ContextIndexSearcherTests.java @@ -0,0 +1,425 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.internal; + +import org.apache.lucene.analysis.standard.StandardAnalyzer; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.StringField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.FilterDirectoryReader; +import org.apache.lucene.index.FilterLeafReader; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.NoMergePolicy; +import org.apache.lucene.index.PostingsEnum; +import org.apache.lucene.index.Term; +import org.apache.lucene.index.TermsEnum; +import org.apache.lucene.search.BulkScorer; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.Explanation; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.LeafCollector; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.Weight; +import org.apache.lucene.store.Directory; +import org.apache.lucene.util.Accountable; +import org.apache.lucene.util.BitSet; +import org.apache.lucene.util.BitSetIterator; +import org.apache.lucene.util.Bits; +import org.apache.lucene.util.CombinedBitSet; +import org.apache.lucene.util.FixedBitSet; +import org.apache.lucene.util.SparseFixedBitSet; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.cache.bitset.BitsetFilterCache; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.search.aggregations.LeafBucketCollector; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.IndexSettingsModule; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.Collections; +import java.util.IdentityHashMap; +import java.util.Set; + +import static org.elasticsearch.search.internal.ContextIndexSearcher.intersectScorerAndBitSet; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; + +public class ContextIndexSearcherTests extends ESTestCase { + public void testIntersectScorerAndRoleBits() throws Exception { + final Directory directory = newDirectory(); + IndexWriter iw = new IndexWriter( + directory, + new IndexWriterConfig(new StandardAnalyzer()).setMergePolicy(NoMergePolicy.INSTANCE) + ); + + Document document = new Document(); + document.add(new StringField("field1", "value1", Field.Store.NO)); + document.add(new StringField("field2", "value1", Field.Store.NO)); + iw.addDocument(document); + + document = new Document(); + document.add(new StringField("field1", "value2", Field.Store.NO)); + document.add(new StringField("field2", "value1", Field.Store.NO)); + iw.addDocument(document); + + document = new Document(); + document.add(new StringField("field1", "value3", Field.Store.NO)); + document.add(new StringField("field2", "value1", Field.Store.NO)); + iw.addDocument(document); + + document = new Document(); + document.add(new StringField("field1", "value4", Field.Store.NO)); + document.add(new StringField("field2", "value1", Field.Store.NO)); + iw.addDocument(document); + + iw.commit(); + iw.deleteDocuments(new Term("field1", "value3")); + iw.close(); + DirectoryReader directoryReader = DirectoryReader.open(directory); + IndexSearcher searcher = new IndexSearcher(directoryReader); + Weight weight = searcher.createWeight(new TermQuery(new Term("field2", "value1")), + org.apache.lucene.search.ScoreMode.COMPLETE_NO_SCORES, 1f); + + LeafReaderContext leaf = directoryReader.leaves().get(0); + + CombinedBitSet bitSet = new CombinedBitSet(query(leaf, "field1", "value1"), leaf.reader().getLiveDocs()); + LeafCollector leafCollector = new LeafBucketCollector() { + @Override + public void collect(int doc, long bucket) throws IOException { + assertThat(doc, equalTo(0)); + } + }; + intersectScorerAndBitSet(weight.scorer(leaf), bitSet, leafCollector, () -> {}); + + bitSet = new CombinedBitSet(query(leaf, "field1", "value2"), leaf.reader().getLiveDocs()); + leafCollector = new LeafBucketCollector() { + @Override + public void collect(int doc, long bucket) throws IOException { + assertThat(doc, equalTo(1)); + } + }; + intersectScorerAndBitSet(weight.scorer(leaf), bitSet, leafCollector, () -> {}); + + + bitSet = new CombinedBitSet(query(leaf, "field1", "value3"), leaf.reader().getLiveDocs()); + leafCollector = new LeafBucketCollector() { + @Override + public void collect(int doc, long bucket) throws IOException { + fail("docId [" + doc + "] should have been deleted"); + } + }; + intersectScorerAndBitSet(weight.scorer(leaf), bitSet, leafCollector, () -> {}); + + bitSet = new CombinedBitSet(query(leaf, "field1", "value4"), leaf.reader().getLiveDocs()); + leafCollector = new LeafBucketCollector() { + @Override + public void collect(int doc, long bucket) throws IOException { + assertThat(doc, equalTo(3)); + } + }; + intersectScorerAndBitSet(weight.scorer(leaf), bitSet, leafCollector, () -> {}); + + directoryReader.close(); + directory.close(); + } + + public void testContextIndexSearcherSparseNoDeletions() throws IOException { + doTestContextIndexSearcher(true, false); + } + + public void testContextIndexSearcherDenseNoDeletions() throws IOException { + doTestContextIndexSearcher(false, false); + } + + public void testContextIndexSearcherSparseWithDeletions() throws IOException { + doTestContextIndexSearcher(true, true); + } + + public void testContextIndexSearcherDenseWithDeletions() throws IOException { + doTestContextIndexSearcher(false, true); + } + + public void doTestContextIndexSearcher(boolean sparse, boolean deletions) throws IOException { + Directory dir = newDirectory(); + IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(null)); + Document doc = new Document(); + StringField allowedField = new StringField("allowed", "yes", Field.Store.NO); + doc.add(allowedField); + StringField fooField = new StringField("foo", "bar", Field.Store.NO); + doc.add(fooField); + StringField deleteField = new StringField("delete", "no", Field.Store.NO); + doc.add(deleteField); + w.addDocument(doc); + if (deletions) { + // add a document that matches foo:bar but will be deleted + deleteField.setStringValue("yes"); + w.addDocument(doc); + deleteField.setStringValue("no"); + } + allowedField.setStringValue("no"); + w.addDocument(doc); + if (sparse) { + for (int i = 0; i < 1000; ++i) { + w.addDocument(doc); + } + w.forceMerge(1); + } + w.deleteDocuments(new Term("delete", "yes")); + + IndexSettings settings = IndexSettingsModule.newIndexSettings("_index", Settings.EMPTY); + BitsetFilterCache.Listener listener = new BitsetFilterCache.Listener() { + @Override + public void onCache(ShardId shardId, Accountable accountable) { + + } + + @Override + public void onRemoval(ShardId shardId, Accountable accountable) { + + } + }; + DirectoryReader reader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(w), + new ShardId(settings.getIndex(), 0)); + BitsetFilterCache cache = new BitsetFilterCache(settings, listener); + Query roleQuery = new TermQuery(new Term("allowed", "yes")); + BitSet bitSet = cache.getBitSetProducer(roleQuery).getBitSet(reader.leaves().get(0)); + if (sparse) { + assertThat(bitSet, instanceOf(SparseFixedBitSet.class)); + } else { + assertThat(bitSet, instanceOf(FixedBitSet.class)); + } + + DocumentSubsetDirectoryReader filteredReader = new DocumentSubsetDirectoryReader(reader, cache, roleQuery); + + ContextIndexSearcher searcher = new ContextIndexSearcher(filteredReader, IndexSearcher.getDefaultSimilarity(), + IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy()); + searcher.setCheckCancelled(() -> {}); + + // Searching a non-existing term will trigger a null scorer + assertEquals(0, searcher.count(new TermQuery(new Term("non_existing_field", "non_existing_value")))); + + assertEquals(1, searcher.count(new TermQuery(new Term("foo", "bar")))); + + // make sure scorers are created only once, see #1725 + assertEquals(1, searcher.count(new CreateScorerOnceQuery(new MatchAllDocsQuery()))); + IOUtils.close(reader, w, dir); + } + + private SparseFixedBitSet query(LeafReaderContext leaf, String field, String value) throws IOException { + SparseFixedBitSet sparseFixedBitSet = new SparseFixedBitSet(leaf.reader().maxDoc()); + TermsEnum tenum = leaf.reader().terms(field).iterator(); + while (tenum.next().utf8ToString().equals(value) == false) { + } + PostingsEnum penum = tenum.postings(null); + sparseFixedBitSet.or(penum); + return sparseFixedBitSet; + } + + private static class DocumentSubsetDirectoryReader extends FilterDirectoryReader { + private final BitsetFilterCache bitsetFilterCache; + private final Query roleQuery; + + DocumentSubsetDirectoryReader(DirectoryReader in, BitsetFilterCache bitsetFilterCache, Query roleQuery) throws IOException { + super(in, new SubReaderWrapper() { + @Override + public LeafReader wrap(LeafReader reader) { + try { + return new DocumentSubsetReader(reader, bitsetFilterCache, roleQuery); + } catch (Exception e) { + throw ExceptionsHelper.convertToElastic(e); + } + } + }); + this.bitsetFilterCache = bitsetFilterCache; + this.roleQuery = roleQuery; + } + + @Override + protected DirectoryReader doWrapDirectoryReader(DirectoryReader in) throws IOException { + return new DocumentSubsetDirectoryReader(in, bitsetFilterCache, roleQuery); + } + + + @Override + public CacheHelper getReaderCacheHelper() { + return in.getReaderCacheHelper(); + } + } + + private static class DocumentSubsetReader extends FilterLeafReader { + private final BitSet roleQueryBits; + private final int numDocs; + + /** + *

Construct a FilterLeafReader based on the specified base reader. + *

Note that base reader is closed if this FilterLeafReader is closed.

+ * + * @param in specified base reader. + */ + DocumentSubsetReader(LeafReader in, BitsetFilterCache bitsetFilterCache, Query roleQuery) throws IOException { + super(in); + this.roleQueryBits = bitsetFilterCache.getBitSetProducer(roleQuery).getBitSet(in.getContext()); + this.numDocs = computeNumDocs(in, roleQueryBits); + } + + @Override + public CacheHelper getCoreCacheHelper() { + return in.getCoreCacheHelper(); + } + + @Override + public CacheHelper getReaderCacheHelper() { + // Not delegated since we change the live docs + return null; + } + + @Override + public int numDocs() { + return numDocs; + } + + @Override + public Bits getLiveDocs() { + final Bits actualLiveDocs = in.getLiveDocs(); + if (roleQueryBits == null) { + return new Bits.MatchNoBits(in.maxDoc()); + } else if (actualLiveDocs == null) { + return roleQueryBits; + } else { + // apply deletes when needed: + return new CombinedBitSet(roleQueryBits, actualLiveDocs); + } + } + + private static int computeNumDocs(LeafReader reader, BitSet roleQueryBits) { + final Bits liveDocs = reader.getLiveDocs(); + if (roleQueryBits == null) { + return 0; + } else if (liveDocs == null) { + // slow + return roleQueryBits.cardinality(); + } else { + // very slow, but necessary in order to be correct + int numDocs = 0; + DocIdSetIterator it = new BitSetIterator(roleQueryBits, 0L); // we don't use the cost + try { + for (int doc = it.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = it.nextDoc()) { + if (liveDocs.get(doc)) { + numDocs++; + } + } + return numDocs; + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + } + } + + private static class CreateScorerOnceWeight extends Weight { + + private final Weight weight; + private final Set seenLeaves = Collections.newSetFromMap(new IdentityHashMap<>()); + + CreateScorerOnceWeight(Weight weight) { + super(weight.getQuery()); + this.weight = weight; + } + + @Override + public void extractTerms(Set terms) { + weight.extractTerms(terms); + } + + @Override + public Explanation explain(LeafReaderContext context, int doc) throws IOException { + return weight.explain(context, doc); + } + + @Override + public Scorer scorer(LeafReaderContext context) throws IOException { + assertTrue(seenLeaves.add(context.reader().getCoreCacheHelper().getKey())); + return weight.scorer(context); + } + + @Override + public BulkScorer bulkScorer(LeafReaderContext context) + throws IOException { + assertTrue(seenLeaves.add(context.reader().getCoreCacheHelper().getKey())); + return weight.bulkScorer(context); + } + + @Override + public boolean isCacheable(LeafReaderContext ctx) { + return true; + } + } + + private static class CreateScorerOnceQuery extends Query { + + private final Query query; + + CreateScorerOnceQuery(Query query) { + this.query = query; + } + + @Override + public String toString(String field) { + return query.toString(field); + } + + @Override + public Query rewrite(IndexReader reader) throws IOException { + Query queryRewritten = query.rewrite(reader); + if (query != queryRewritten) { + return new CreateScorerOnceQuery(queryRewritten); + } + return super.rewrite(reader); + } + + @Override + public Weight createWeight(IndexSearcher searcher, org.apache.lucene.search.ScoreMode scoreMode, float boost) throws IOException { + return new CreateScorerOnceWeight(query.createWeight(searcher, scoreMode, boost)); + } + + @Override + public boolean equals(Object obj) { + return sameClassAs(obj) && query.equals(((CreateScorerOnceQuery) obj).query); + } + + @Override + public int hashCode() { + return 31 * classHash() + query.hashCode(); + } + } +} diff --git a/server/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerTests.java b/server/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerTests.java index 275349e15cd..b29d3ba3b7d 100644 --- a/server/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerTests.java +++ b/server/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerTests.java @@ -44,7 +44,6 @@ import org.apache.lucene.search.Weight; import org.apache.lucene.store.Directory; import org.elasticsearch.core.internal.io.IOUtils; import org.apache.lucene.util.TestUtil; -import org.elasticsearch.index.engine.Engine; import org.elasticsearch.search.internal.ContextIndexSearcher; import org.elasticsearch.search.profile.ProfileResult; import org.elasticsearch.test.ESTestCase; @@ -81,8 +80,8 @@ public class QueryProfilerTests extends ESTestCase { } reader = w.getReader(); w.close(); - Engine.Searcher engineSearcher = new Engine.Searcher("test", new IndexSearcher(reader), null); - searcher = new ContextIndexSearcher(engineSearcher, IndexSearcher.getDefaultQueryCache(), MAYBE_CACHE_POLICY); + searcher = new ContextIndexSearcher(reader, IndexSearcher.getDefaultSimilarity(), + IndexSearcher.getDefaultQueryCache(), MAYBE_CACHE_POLICY); } @AfterClass @@ -159,10 +158,10 @@ public class QueryProfilerTests extends ESTestCase { public void testApproximations() throws IOException { QueryProfiler profiler = new QueryProfiler(); - Engine.Searcher engineSearcher = new Engine.Searcher("test", new IndexSearcher(reader), reader::close); // disable query caching since we want to test approximations, which won't // be exposed on a cached entry - ContextIndexSearcher searcher = new ContextIndexSearcher(engineSearcher, null, MAYBE_CACHE_POLICY); + ContextIndexSearcher searcher = new ContextIndexSearcher(reader, IndexSearcher.getDefaultSimilarity(), + null, MAYBE_CACHE_POLICY); searcher.setProfiler(profiler); Query query = new RandomApproximationQuery(new TermQuery(new Term("foo", "bar")), random()); searcher.count(query); diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java index 4b5be292057..32607fa607e 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.index.shard; +import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexNotFoundException; import org.apache.lucene.store.Directory; import org.elasticsearch.Version; @@ -278,8 +279,9 @@ public abstract class IndexShardTestCase extends ESTestCase { * (ready to recover from another shard) */ protected IndexShard newShard(ShardId shardId, boolean primary, String nodeId, IndexMetaData indexMetaData, - @Nullable IndexSearcherWrapper searcherWrapper) throws IOException { - return newShard(shardId, primary, nodeId, indexMetaData, searcherWrapper, () -> {}); + @Nullable CheckedFunction readerWrapper) + throws IOException { + return newShard(shardId, primary, nodeId, indexMetaData, readerWrapper, () -> {}); } /** @@ -291,11 +293,12 @@ public abstract class IndexShardTestCase extends ESTestCase { * (ready to recover from another shard) */ protected IndexShard newShard(ShardId shardId, boolean primary, String nodeId, IndexMetaData indexMetaData, - @Nullable IndexSearcherWrapper searcherWrapper, Runnable globalCheckpointSyncer) throws IOException { + @Nullable CheckedFunction readerWrapper, + Runnable globalCheckpointSyncer) throws IOException { ShardRouting shardRouting = TestShardRouting.newShardRouting(shardId, nodeId, primary, ShardRoutingState.INITIALIZING, primary ? RecoverySource.EmptyStoreRecoverySource.INSTANCE : RecoverySource.PeerRecoverySource.INSTANCE); return newShard( - shardRouting, indexMetaData, searcherWrapper, new InternalEngineFactory(), globalCheckpointSyncer, RetentionLeaseSyncer.EMPTY); + shardRouting, indexMetaData, readerWrapper, new InternalEngineFactory(), globalCheckpointSyncer, RetentionLeaseSyncer.EMPTY); } /** @@ -306,10 +309,11 @@ public abstract class IndexShardTestCase extends ESTestCase { * @param indexMetaData indexMetaData for the shard, including any mapping * @param listeners an optional set of listeners to add to the shard */ - protected IndexShard newShard( - ShardRouting routing, IndexMetaData indexMetaData, EngineFactory engineFactory, IndexingOperationListener... listeners) + protected IndexShard newShard(ShardRouting routing, IndexMetaData indexMetaData, + @Nullable CheckedFunction indexReaderWrapper, + EngineFactory engineFactory, IndexingOperationListener... listeners) throws IOException { - return newShard(routing, indexMetaData, null, engineFactory, () -> {}, RetentionLeaseSyncer.EMPTY, listeners); + return newShard(routing, indexMetaData, indexReaderWrapper, engineFactory, () -> {}, RetentionLeaseSyncer.EMPTY, listeners); } /** @@ -317,22 +321,20 @@ public abstract class IndexShardTestCase extends ESTestCase { * current node id the shard is assigned to. * @param routing shard routing to use * @param indexMetaData indexMetaData for the shard, including any mapping - * @param indexSearcherWrapper an optional wrapper to be used during searchers + * @param indexReaderWrapper an optional wrapper to be used during search * @param globalCheckpointSyncer callback for syncing global checkpoints * @param listeners an optional set of listeners to add to the shard */ protected IndexShard newShard(ShardRouting routing, IndexMetaData indexMetaData, - @Nullable IndexSearcherWrapper indexSearcherWrapper, - @Nullable EngineFactory engineFactory, - Runnable globalCheckpointSyncer, - RetentionLeaseSyncer retentionLeaseSyncer, - IndexingOperationListener... listeners) + @Nullable CheckedFunction indexReaderWrapper, + @Nullable EngineFactory engineFactory, Runnable globalCheckpointSyncer, RetentionLeaseSyncer retentionLeaseSyncer, + IndexingOperationListener... listeners) throws IOException { // add node id as name to settings for proper logging final ShardId shardId = routing.shardId(); final NodeEnvironment.NodePath nodePath = new NodeEnvironment.NodePath(createTempDir()); ShardPath shardPath = new ShardPath(false, nodePath.resolve(shardId), nodePath.resolve(shardId), shardId); - return newShard(routing, shardPath, indexMetaData, null, indexSearcherWrapper, engineFactory, globalCheckpointSyncer, + return newShard(routing, shardPath, indexMetaData, null, indexReaderWrapper, engineFactory, globalCheckpointSyncer, retentionLeaseSyncer, EMPTY_EVENT_LISTENER, listeners); } @@ -342,14 +344,14 @@ public abstract class IndexShardTestCase extends ESTestCase { * @param shardPath path to use for shard data * @param indexMetaData indexMetaData for the shard, including any mapping * @param storeProvider an optional custom store provider to use. If null a default file based store will be created - * @param indexSearcherWrapper an optional wrapper to be used during searchers + * @param indexReaderWrapper an optional wrapper to be used during search * @param globalCheckpointSyncer callback for syncing global checkpoints * @param indexEventListener index event listener * @param listeners an optional set of listeners to add to the shard */ protected IndexShard newShard(ShardRouting routing, ShardPath shardPath, IndexMetaData indexMetaData, @Nullable CheckedFunction storeProvider, - @Nullable IndexSearcherWrapper indexSearcherWrapper, + @Nullable CheckedFunction indexReaderWrapper, @Nullable EngineFactory engineFactory, Runnable globalCheckpointSyncer, RetentionLeaseSyncer retentionLeaseSyncer, IndexEventListener indexEventListener, IndexingOperationListener... listeners) throws IOException { @@ -382,7 +384,7 @@ public abstract class IndexShardTestCase extends ESTestCase { similarityService, engineFactory, indexEventListener, - indexSearcherWrapper, + indexReaderWrapper, threadPool, BigArrays.NON_RECYCLING_INSTANCE, warmer, diff --git a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java index eb9b4d32423..0c197103e4b 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java @@ -32,7 +32,6 @@ import org.apache.lucene.search.QueryCache; import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Weight; -import org.apache.lucene.search.XIndexSearcher; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.lease.Releasable; @@ -47,7 +46,6 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.cache.bitset.BitsetFilterCache; import org.elasticsearch.index.cache.bitset.BitsetFilterCache.Listener; import org.elasticsearch.index.cache.query.DisabledQueryCache; -import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexFieldDataCache; import org.elasticsearch.index.fielddata.IndexFieldDataService; @@ -240,7 +238,6 @@ public abstract class AggregatorTestCase extends ESTestCase { } protected SearchContext createSearchContext(IndexSearcher indexSearcher, IndexSettings indexSettings) { - Engine.Searcher searcher = new Engine.Searcher("aggregator_test", indexSearcher, () -> indexSearcher.getIndexReader().close()); QueryCache queryCache = new DisabledQueryCache(indexSettings); QueryCachingPolicy queryCachingPolicy = new QueryCachingPolicy() { @Override @@ -253,7 +250,8 @@ public abstract class AggregatorTestCase extends ESTestCase { return false; } }; - ContextIndexSearcher contextIndexSearcher = new ContextIndexSearcher(searcher, queryCache, queryCachingPolicy); + ContextIndexSearcher contextIndexSearcher = new ContextIndexSearcher(indexSearcher.getIndexReader(), + indexSearcher.getSimilarity(), queryCache, queryCachingPolicy); SearchContext searchContext = mock(SearchContext.class); when(searchContext.numberOfShards()).thenReturn(1); @@ -464,16 +462,8 @@ public abstract class AggregatorTestCase extends ESTestCase { */ protected static IndexSearcher newIndexSearcher(IndexReader indexReader) { if (randomBoolean()) { - final IndexSearcher delegate = new IndexSearcher(indexReader); - final XIndexSearcher wrappedSearcher = new XIndexSearcher(delegate); // this executes basic query checks and asserts that weights are normalized only once etc. - return new AssertingIndexSearcher(random(), indexReader) { - @Override - protected void search(List leaves, Weight weight, Collector collector) throws IOException { - // we cannot use the asserting searcher because the weight is created by the ContextIndexSearcher - wrappedSearcher.search(leaves, weight, collector); - } - }; + return new AssertingIndexSearcher(random(), indexReader); } else { return new IndexSearcher(indexReader); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java b/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java index b3a6fe84908..52b086db338 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java +++ b/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java @@ -24,14 +24,9 @@ import org.apache.lucene.index.AssertingDirectoryReader; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.FilterDirectoryReader; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.AssertingIndexSearcher; -import org.apache.lucene.search.Collector; -import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.QueryCache; import org.apache.lucene.search.QueryCachingPolicy; -import org.apache.lucene.search.Weight; -import org.apache.lucene.search.XIndexSearcher; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.settings.Setting; @@ -47,7 +42,6 @@ import java.io.Closeable; import java.io.IOException; import java.lang.reflect.Constructor; import java.util.IdentityHashMap; -import java.util.List; import java.util.Random; import java.util.concurrent.atomic.AtomicBoolean; @@ -151,19 +145,8 @@ public final class MockEngineSupport { if (reader instanceof DirectoryReader && mockContext.wrapReader) { wrappedReader = wrapReader((DirectoryReader) reader); } - final IndexSearcher delegate = new IndexSearcher(wrappedReader); - delegate.setSimilarity(searcher.searcher().getSimilarity()); - delegate.setQueryCache(filterCache); - delegate.setQueryCachingPolicy(filterCachingPolicy); - final XIndexSearcher wrappedSearcher = new XIndexSearcher(delegate); // this executes basic query checks and asserts that weights are normalized only once etc. - final AssertingIndexSearcher assertingIndexSearcher = new AssertingIndexSearcher(mockContext.random, wrappedReader) { - @Override - protected void search(List leaves, Weight weight, Collector collector) throws IOException { - // we cannot use the asserting searcher because the weight is created by the ContextIndexSearcher - wrappedSearcher.search(leaves, weight, collector); - } - }; + final AssertingIndexSearcher assertingIndexSearcher = new AssertingIndexSearcher(mockContext.random, wrappedReader); assertingIndexSearcher.setSimilarity(searcher.searcher().getSimilarity()); assertingIndexSearcher.setQueryCache(filterCache); assertingIndexSearcher.setQueryCachingPolicy(filterCachingPolicy); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/index/engine/FrozenEngine.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/index/engine/FrozenEngine.java index 631bd0b9ef9..412d64803a0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/index/engine/FrozenEngine.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/index/engine/FrozenEngine.java @@ -272,8 +272,8 @@ public final class FrozenEngine extends ReadOnlyEngine { @Override public void validateSearchContext(SearchContext context, TransportRequest transportRequest) { - Searcher engineSearcher = context.searcher().getEngineSearcher(); - LazyDirectoryReader lazyDirectoryReader = unwrapLazyReader(engineSearcher.getDirectoryReader()); + DirectoryReader dirReader = context.searcher().getDirectoryReader(); + LazyDirectoryReader lazyDirectoryReader = unwrapLazyReader(dirReader); if (lazyDirectoryReader != null) { try { lazyDirectoryReader.reset(); @@ -297,8 +297,8 @@ public final class FrozenEngine extends ReadOnlyEngine { @Override public void onNewContext(SearchContext context) { - Searcher engineSearcher = context.searcher().getEngineSearcher(); - LazyDirectoryReader lazyDirectoryReader = unwrapLazyReader(engineSearcher.getDirectoryReader()); + DirectoryReader dirReader = context.searcher().getDirectoryReader(); + LazyDirectoryReader lazyDirectoryReader = unwrapLazyReader(dirReader); if (lazyDirectoryReader != null) { registerRelease(context, lazyDirectoryReader); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetReader.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetReader.java index c7d84b3c40f..af84315abf4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetReader.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetReader.java @@ -16,6 +16,7 @@ import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.util.BitSet; import org.apache.lucene.util.BitSetIterator; import org.apache.lucene.util.Bits; +import org.apache.lucene.util.CombinedBitSet; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.cache.Cache; import org.elasticsearch.common.cache.CacheBuilder; @@ -172,18 +173,7 @@ public final class DocumentSubsetReader extends FilterLeafReader { return roleQueryBits; } else { // apply deletes when needed: - return new Bits() { - - @Override - public boolean get(int index) { - return roleQueryBits.get(index) && actualLiveDocs.get(index); - } - - @Override - public int length() { - return roleQueryBits.length(); - } - }; + return new CombinedBitSet(roleQueryBits, actualLiveDocs); } } @@ -208,13 +198,4 @@ public final class DocumentSubsetReader extends FilterLeafReader { // Not delegated since we change the live docs return null; } - - BitSet getRoleQueryBits() { - return roleQueryBits; - } - - Bits getWrappedLiveDocs() { - return in.getLiveDocs(); - } - } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexReaderWrapper.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexReaderWrapper.java new file mode 100644 index 00000000000..6ea8ae84e11 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexReaderWrapper.java @@ -0,0 +1,111 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authz.accesscontrol; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.ConstantScoreQuery; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.common.CheckedFunction; +import org.elasticsearch.common.logging.LoggerMessageFormat; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.index.cache.bitset.BitsetFilterCache; +import org.elasticsearch.index.query.QueryShardContext; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.shard.ShardUtils; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authz.AuthorizationServiceField; +import org.elasticsearch.xpack.core.security.authz.permission.DocumentPermissions; +import org.elasticsearch.xpack.core.security.support.Exceptions; +import org.elasticsearch.xpack.core.security.user.User; + +import java.io.IOException; +import java.util.function.Function; + +/** + * An IndexReader wrapper implementation that is used for field and document level security. + *

+ * Based on the {@link ThreadContext} this class will enable field and/or document level security. + *

+ * Field level security is enabled by wrapping the original {@link DirectoryReader} in a {@link FieldSubsetReader} + * in the {@link #apply(DirectoryReader)} method. + *

+ * Document level security is enabled by wrapping the original {@link DirectoryReader} in a {@link DocumentSubsetReader} + * instance. + */ +public class SecurityIndexReaderWrapper implements CheckedFunction { + private static final Logger logger = LogManager.getLogger(SecurityIndexReaderWrapper.class); + + private final Function queryShardContextProvider; + private final BitsetFilterCache bitsetFilterCache; + private final XPackLicenseState licenseState; + private final ThreadContext threadContext; + private final ScriptService scriptService; + + public SecurityIndexReaderWrapper(Function queryShardContextProvider, + BitsetFilterCache bitsetFilterCache, ThreadContext threadContext, XPackLicenseState licenseState, + ScriptService scriptService) { + this.scriptService = scriptService; + this.queryShardContextProvider = queryShardContextProvider; + this.bitsetFilterCache = bitsetFilterCache; + this.threadContext = threadContext; + this.licenseState = licenseState; + } + + @Override + public DirectoryReader apply(final DirectoryReader reader) { + if (licenseState.isDocumentAndFieldLevelSecurityAllowed() == false) { + return reader; + } + + try { + final IndicesAccessControl indicesAccessControl = getIndicesAccessControl(); + + ShardId shardId = ShardUtils.extractShardId(reader); + if (shardId == null) { + throw new IllegalStateException(LoggerMessageFormat.format("couldn't extract shardId from reader [{}]", reader)); + } + + final IndicesAccessControl.IndexAccessControl permissions = indicesAccessControl.getIndexPermissions(shardId.getIndexName()); + // No permissions have been defined for an index, so don't intercept the index reader for access control + if (permissions == null) { + return reader; + } + + DirectoryReader wrappedReader = reader; + DocumentPermissions documentPermissions = permissions.getDocumentPermissions(); + if (documentPermissions != null && documentPermissions.hasDocumentLevelPermissions()) { + BooleanQuery filterQuery = documentPermissions.filter(getUser(), scriptService, shardId, queryShardContextProvider); + if (filterQuery != null) { + wrappedReader = DocumentSubsetReader.wrap(wrappedReader, bitsetFilterCache, new ConstantScoreQuery(filterQuery)); + } + } + + return permissions.getFieldPermissions().filter(wrappedReader); + } catch (IOException e) { + logger.error("Unable to apply field level security"); + throw ExceptionsHelper.convertToElastic(e); + } + } + + protected IndicesAccessControl getIndicesAccessControl() { + IndicesAccessControl indicesAccessControl = threadContext.getTransient(AuthorizationServiceField.INDICES_PERMISSIONS_KEY); + if (indicesAccessControl == null) { + throw Exceptions.authorizationError("no indices permissions found"); + } + return indicesAccessControl; + } + + protected User getUser(){ + Authentication authentication = Authentication.getAuthentication(threadContext); + return authentication.getUser(); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapper.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapper.java deleted file mode 100644 index 6608e5a64c8..00000000000 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapper.java +++ /dev/null @@ -1,217 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.core.security.authz.accesscontrol; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.apache.lucene.index.DirectoryReader; -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.search.BooleanQuery; -import org.apache.lucene.search.BulkScorer; -import org.apache.lucene.search.CollectionTerminatedException; -import org.apache.lucene.search.Collector; -import org.apache.lucene.search.ConjunctionDISI; -import org.apache.lucene.search.ConstantScoreQuery; -import org.apache.lucene.search.DocIdSetIterator; -import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.LeafCollector; -import org.apache.lucene.search.Scorer; -import org.apache.lucene.search.Weight; -import org.apache.lucene.util.BitSet; -import org.apache.lucene.util.BitSetIterator; -import org.apache.lucene.util.Bits; -import org.apache.lucene.util.SparseFixedBitSet; -import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.common.logging.LoggerMessageFormat; -import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.index.cache.bitset.BitsetFilterCache; -import org.elasticsearch.index.engine.EngineException; -import org.elasticsearch.index.query.QueryShardContext; -import org.elasticsearch.index.shard.IndexSearcherWrapper; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.shard.ShardUtils; -import org.elasticsearch.license.XPackLicenseState; -import org.elasticsearch.script.ScriptService; -import org.elasticsearch.xpack.core.security.authc.Authentication; -import org.elasticsearch.xpack.core.security.authz.AuthorizationServiceField; -import org.elasticsearch.xpack.core.security.authz.accesscontrol.DocumentSubsetReader.DocumentSubsetDirectoryReader; -import org.elasticsearch.xpack.core.security.authz.permission.DocumentPermissions; -import org.elasticsearch.xpack.core.security.support.Exceptions; -import org.elasticsearch.xpack.core.security.user.User; - -import java.io.IOException; -import java.util.Arrays; -import java.util.List; -import java.util.function.Function; - -/** - * An {@link IndexSearcherWrapper} implementation that is used for field and document level security. - *

- * Based on the {@link ThreadContext} this class will enable field and/or document level security. - *

- * Field level security is enabled by wrapping the original {@link DirectoryReader} in a {@link FieldSubsetReader} - * in the {@link #wrap(DirectoryReader)} method. - *

- * Document level security is enabled by wrapping the original {@link DirectoryReader} in a {@link DocumentSubsetReader} - * instance. - */ -public class SecurityIndexSearcherWrapper extends IndexSearcherWrapper { - private static final Logger logger = LogManager.getLogger(SecurityIndexSearcherWrapper.class); - - private final Function queryShardContextProvider; - private final BitsetFilterCache bitsetFilterCache; - private final XPackLicenseState licenseState; - private final ThreadContext threadContext; - private final ScriptService scriptService; - - public SecurityIndexSearcherWrapper(Function queryShardContextProvider, - BitsetFilterCache bitsetFilterCache, ThreadContext threadContext, XPackLicenseState licenseState, - ScriptService scriptService) { - this.scriptService = scriptService; - this.queryShardContextProvider = queryShardContextProvider; - this.bitsetFilterCache = bitsetFilterCache; - this.threadContext = threadContext; - this.licenseState = licenseState; - } - - @Override - protected DirectoryReader wrap(final DirectoryReader reader) { - if (licenseState.isDocumentAndFieldLevelSecurityAllowed() == false) { - return reader; - } - - try { - final IndicesAccessControl indicesAccessControl = getIndicesAccessControl(); - - ShardId shardId = ShardUtils.extractShardId(reader); - if (shardId == null) { - throw new IllegalStateException(LoggerMessageFormat.format("couldn't extract shardId from reader [{}]", reader)); - } - - final IndicesAccessControl.IndexAccessControl permissions = indicesAccessControl.getIndexPermissions(shardId.getIndexName()); - // No permissions have been defined for an index, so don't intercept the index reader for access control - if (permissions == null) { - return reader; - } - - DirectoryReader wrappedReader = reader; - DocumentPermissions documentPermissions = permissions.getDocumentPermissions(); - if (documentPermissions != null && documentPermissions.hasDocumentLevelPermissions()) { - BooleanQuery filterQuery = documentPermissions.filter(getUser(), scriptService, shardId, queryShardContextProvider); - if (filterQuery != null) { - wrappedReader = DocumentSubsetReader.wrap(wrappedReader, bitsetFilterCache, new ConstantScoreQuery(filterQuery)); - } - } - - return permissions.getFieldPermissions().filter(wrappedReader); - } catch (IOException e) { - logger.error("Unable to apply field level security"); - throw ExceptionsHelper.convertToElastic(e); - } - } - - @Override - protected IndexSearcher wrap(IndexSearcher searcher) throws EngineException { - if (licenseState.isDocumentAndFieldLevelSecurityAllowed() == false) { - return searcher; - } - - final DirectoryReader directoryReader = (DirectoryReader) searcher.getIndexReader(); - if (directoryReader instanceof DocumentSubsetDirectoryReader) { - // The reasons why we return a custom searcher: - // 1) in the case the role query is sparse then large part of the main query can be skipped - // 2) If the role query doesn't match with any docs in a segment, that a segment can be skipped - IndexSearcher searcherWrapper = new IndexSearcherWrapper((DocumentSubsetDirectoryReader) directoryReader); - searcherWrapper.setQueryCache(searcher.getQueryCache()); - searcherWrapper.setQueryCachingPolicy(searcher.getQueryCachingPolicy()); - searcherWrapper.setSimilarity(searcher.getSimilarity()); - return searcherWrapper; - } - return searcher; - } - - static class IndexSearcherWrapper extends IndexSearcher { - - IndexSearcherWrapper(DocumentSubsetDirectoryReader r) { - super(r); - } - - @Override - protected void search(List leaves, Weight weight, Collector collector) throws IOException { - for (LeafReaderContext ctx : leaves) { // search each subreader - final LeafCollector leafCollector; - try { - leafCollector = collector.getLeafCollector(ctx); - } catch (CollectionTerminatedException e) { - // there is no doc of interest in this reader context - // continue with the following leaf - continue; - } - // The reader is always of type DocumentSubsetReader when we get here: - DocumentSubsetReader reader = (DocumentSubsetReader) ctx.reader(); - - BitSet roleQueryBits = reader.getRoleQueryBits(); - if (roleQueryBits == null) { - // nothing matches with the role query, so skip this segment: - continue; - } - - // if the role query result set is sparse then we should use the SparseFixedBitSet for advancing: - if (roleQueryBits instanceof SparseFixedBitSet) { - Scorer scorer = weight.scorer(ctx); - if (scorer != null) { - SparseFixedBitSet sparseFixedBitSet = (SparseFixedBitSet) roleQueryBits; - Bits realLiveDocs = reader.getWrappedLiveDocs(); - try { - intersectScorerAndRoleBits(scorer, sparseFixedBitSet, leafCollector, realLiveDocs); - } catch (CollectionTerminatedException e) { - // collection was terminated prematurely - // continue with the following leaf - } - } - } else { - BulkScorer bulkScorer = weight.bulkScorer(ctx); - if (bulkScorer != null) { - Bits liveDocs = reader.getLiveDocs(); - try { - bulkScorer.score(leafCollector, liveDocs); - } catch (CollectionTerminatedException e) { - // collection was terminated prematurely - // continue with the following leaf - } - } - } - } - } - } - - static void intersectScorerAndRoleBits(Scorer scorer, SparseFixedBitSet roleBits, LeafCollector collector, Bits acceptDocs) throws - IOException { - // ConjunctionDISI uses the DocIdSetIterator#cost() to order the iterators, so if roleBits has the lowest cardinality it should - // be used first: - DocIdSetIterator iterator = ConjunctionDISI.intersectIterators(Arrays.asList(new BitSetIterator(roleBits, - roleBits.approximateCardinality()), scorer.iterator())); - for (int docId = iterator.nextDoc(); docId < DocIdSetIterator.NO_MORE_DOCS; docId = iterator.nextDoc()) { - if (acceptDocs == null || acceptDocs.get(docId)) { - collector.collect(docId); - } - } - } - - protected IndicesAccessControl getIndicesAccessControl() { - IndicesAccessControl indicesAccessControl = threadContext.getTransient(AuthorizationServiceField.INDICES_PERMISSIONS_KEY); - if (indicesAccessControl == null) { - throw Exceptions.authorizationError("no indices permissions found"); - } - return indicesAccessControl; - } - - protected User getUser(){ - Authentication authentication = Authentication.getAuthentication(threadContext); - return authentication.getUser(); - } - -} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotShardTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotShardTests.java index 948503b3347..758cd16391c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotShardTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotShardTests.java @@ -83,7 +83,7 @@ public class SourceOnlySnapshotShardTests extends IndexShardTestCase { .primaryTerm(0, primaryTerm) .putMapping("_doc", "{\"_source\":{\"enabled\": false}}").build(); - IndexShard shard = newShard(shardRouting, metaData, new InternalEngineFactory()); + IndexShard shard = newShard(shardRouting, metaData, null, new InternalEngineFactory()); recoverShardFromStore(shard); for (int i = 0; i < 1; i++) { @@ -278,7 +278,7 @@ public class SourceOnlySnapshotShardTests extends IndexShardTestCase { .settings(settings) .primaryTerm(0, primaryTerm); metaData.putMapping(mapping); - IndexShard targetShard = newShard(targetShardRouting, metaData.build(), new InternalEngineFactory()); + IndexShard targetShard = newShard(targetShardRouting, metaData.build(), null, new InternalEngineFactory()); boolean success = false; try { recoverShardFromStore(targetShard); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapperIntegrationTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexReaderWrapperIntegrationTests.java similarity index 93% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapperIntegrationTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexReaderWrapperIntegrationTests.java index 33a7a0945b5..0b188ff7075 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapperIntegrationTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexReaderWrapperIntegrationTests.java @@ -39,6 +39,7 @@ import org.elasticsearch.index.query.TermsQueryBuilder; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.search.internal.ContextIndexSearcher; import org.elasticsearch.test.AbstractBuilderTestCase; import org.elasticsearch.test.IndexSettingsModule; import org.elasticsearch.xpack.core.security.authc.Authentication; @@ -62,7 +63,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.when; -public class SecurityIndexSearcherWrapperIntegrationTests extends AbstractBuilderTestCase { +public class SecurityIndexReaderWrapperIntegrationTests extends AbstractBuilderTestCase { public void testDLS() throws Exception { ShardId shardId = new ShardId("_index", "_na_", 0); @@ -99,7 +100,7 @@ public class SecurityIndexSearcherWrapperIntegrationTests extends AbstractBuilde }); XPackLicenseState licenseState = mock(XPackLicenseState.class); when(licenseState.isDocumentAndFieldLevelSecurityAllowed()).thenReturn(true); - SecurityIndexSearcherWrapper wrapper = new SecurityIndexSearcherWrapper(s -> queryShardContext, + SecurityIndexReaderWrapper wrapper = new SecurityIndexReaderWrapper(s -> queryShardContext, bitsetFilterCache, threadContext, licenseState, scriptService) { @Override @@ -156,8 +157,9 @@ public class SecurityIndexSearcherWrapperIntegrationTests extends AbstractBuilde ParsedQuery parsedQuery = new ParsedQuery(new TermQuery(new Term("field", values[i]))); doReturn(new TermQueryBuilder("field", values[i])).when(queryShardContext).parseInnerQueryBuilder(any(XContentParser.class)); when(queryShardContext.toQuery(new TermsQueryBuilder("field", values[i]))).thenReturn(parsedQuery); - DirectoryReader wrappedDirectoryReader = wrapper.wrap(directoryReader); - IndexSearcher indexSearcher = wrapper.wrap(new IndexSearcher(wrappedDirectoryReader)); + DirectoryReader wrappedDirectoryReader = wrapper.apply(directoryReader); + IndexSearcher indexSearcher = new ContextIndexSearcher(wrappedDirectoryReader, + IndexSearcher.getDefaultSimilarity(), IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy()); int expectedHitCount = valuesHitCount[i]; logger.info("Going to verify hit count with query [{}] with expected total hits [{}]", parsedQuery.query(), expectedHitCount); @@ -222,7 +224,7 @@ public class SecurityIndexSearcherWrapperIntegrationTests extends AbstractBuilde XPackLicenseState licenseState = mock(XPackLicenseState.class); when(licenseState.isDocumentAndFieldLevelSecurityAllowed()).thenReturn(true); - SecurityIndexSearcherWrapper wrapper = new SecurityIndexSearcherWrapper(s -> queryShardContext, + SecurityIndexReaderWrapper wrapper = new SecurityIndexReaderWrapper(s -> queryShardContext, bitsetFilterCache, threadContext, licenseState, scriptService) { @Override @@ -259,8 +261,9 @@ public class SecurityIndexSearcherWrapperIntegrationTests extends AbstractBuilde iw.close(); DirectoryReader directoryReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(directory), shardId); - DirectoryReader wrappedDirectoryReader = wrapper.wrap(directoryReader); - IndexSearcher indexSearcher = wrapper.wrap(new IndexSearcher(wrappedDirectoryReader)); + DirectoryReader wrappedDirectoryReader = wrapper.apply(directoryReader); + IndexSearcher indexSearcher = new ContextIndexSearcher(wrappedDirectoryReader, + IndexSearcher.getDefaultSimilarity(), IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy()); ScoreDoc[] hits = indexSearcher.search(new MatchAllDocsQuery(), 1000).scoreDocs; Set actualDocIds = new HashSet<>(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexReaderWrapperUnitTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexReaderWrapperUnitTests.java new file mode 100644 index 00000000000..0535c8aa4e2 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexReaderWrapperUnitTests.java @@ -0,0 +1,225 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authz.accesscontrol; + +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.MMapDirectory; +import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.mapper.FieldNamesFieldMapper; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.SeqNoFieldMapper; +import org.elasticsearch.index.mapper.SourceFieldMapper; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.authz.permission.DocumentPermissions; +import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissions; +import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsDefinition; +import org.junit.After; +import org.junit.Before; + +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.Set; + +import static java.util.Collections.singletonMap; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.sameInstance; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class SecurityIndexReaderWrapperUnitTests extends ESTestCase { + + private static final Set META_FIELDS; + static { + final Set metaFields = new HashSet<>(Arrays.asList(MapperService.getAllMetaFields())); + metaFields.add(SourceFieldMapper.NAME); + metaFields.add(FieldNamesFieldMapper.NAME); + metaFields.add(SeqNoFieldMapper.NAME); + META_FIELDS = Collections.unmodifiableSet(metaFields); + } + + private ThreadContext threadContext; + private ScriptService scriptService; + private SecurityIndexReaderWrapper securityIndexReaderWrapper; + private ElasticsearchDirectoryReader esIn; + private XPackLicenseState licenseState; + + @Before + public void setup() throws Exception { + Index index = new Index("_index", "testUUID"); + scriptService = mock(ScriptService.class); + + ShardId shardId = new ShardId(index, 0); + licenseState = mock(XPackLicenseState.class); + when(licenseState.isDocumentAndFieldLevelSecurityAllowed()).thenReturn(true); + threadContext = new ThreadContext(Settings.EMPTY); + IndexShard indexShard = mock(IndexShard.class); + when(indexShard.shardId()).thenReturn(shardId); + + Directory directory = new MMapDirectory(createTempDir()); + IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig()); + writer.close(); + + DirectoryReader in = DirectoryReader.open(directory); // unfortunately DirectoryReader isn't mock friendly + esIn = ElasticsearchDirectoryReader.wrap(in, shardId); + } + + @After + public void tearDown() throws Exception { + super.tearDown(); + esIn.close(); + } + + public void testDefaultMetaFields() throws Exception { + securityIndexReaderWrapper = + new SecurityIndexReaderWrapper(null, null, threadContext, licenseState, scriptService) { + @Override + protected IndicesAccessControl getIndicesAccessControl() { + IndicesAccessControl.IndexAccessControl indexAccessControl = new IndicesAccessControl.IndexAccessControl(true, + new FieldPermissions(fieldPermissionDef(new String[]{}, null)), DocumentPermissions.allowAll()); + return new IndicesAccessControl(true, singletonMap("_index", indexAccessControl)); + } + }; + + FieldSubsetReader.FieldSubsetDirectoryReader result = + (FieldSubsetReader.FieldSubsetDirectoryReader) securityIndexReaderWrapper.apply(esIn); + assertThat(result.getFilter().run("_uid"), is(true)); + assertThat(result.getFilter().run("_id"), is(true)); + assertThat(result.getFilter().run("_version"), is(true)); + assertThat(result.getFilter().run("_type"), is(true)); + assertThat(result.getFilter().run("_source"), is(true)); + assertThat(result.getFilter().run("_routing"), is(true)); + assertThat(result.getFilter().run("_timestamp"), is(true)); + assertThat(result.getFilter().run("_ttl"), is(true)); + assertThat(result.getFilter().run("_size"), is(true)); + assertThat(result.getFilter().run("_index"), is(true)); + assertThat(result.getFilter().run("_field_names"), is(true)); + assertThat(result.getFilter().run("_seq_no"), is(true)); + assertThat(result.getFilter().run("_some_random_meta_field"), is(true)); + assertThat(result.getFilter().run("some_random_regular_field"), is(false)); + } + + public void testWrapReaderWhenFeatureDisabled() throws Exception { + when(licenseState.isDocumentAndFieldLevelSecurityAllowed()).thenReturn(false); + securityIndexReaderWrapper = + new SecurityIndexReaderWrapper(null, null, threadContext, licenseState, scriptService); + DirectoryReader reader = securityIndexReaderWrapper.apply(esIn); + assertThat(reader, sameInstance(esIn)); + } + + public void testWildcards() throws Exception { + Set expected = new HashSet<>(META_FIELDS); + expected.add("field1_a"); + expected.add("field1_b"); + expected.add("field1_c"); + assertResolved(new FieldPermissions(fieldPermissionDef(new String[] {"field1*"}, null)), expected, "field", "field2"); + } + + public void testDotNotion() throws Exception { + Set expected = new HashSet<>(META_FIELDS); + expected.add("foo.bar"); + assertResolved(new FieldPermissions(fieldPermissionDef(new String[] {"foo.bar"}, null)), expected, "foo", "foo.baz", "bar.foo"); + + expected = new HashSet<>(META_FIELDS); + expected.add("foo.bar"); + assertResolved(new FieldPermissions(fieldPermissionDef(new String[] {"foo.*"}, null)), expected, "foo", "bar"); + } + + private void assertResolved(FieldPermissions permissions, Set expected, String... fieldsToTest) { + for (String field : expected) { + assertThat(field, permissions.grantsAccessTo(field), is(true)); + } + for (String field : fieldsToTest) { + assertThat(field, permissions.grantsAccessTo(field), is(expected.contains(field))); + } + } + + public void testFieldPermissionsWithFieldExceptions() throws Exception { + securityIndexReaderWrapper = + new SecurityIndexReaderWrapper(null, null, threadContext, licenseState, null); + String[] grantedFields = new String[]{}; + String[] deniedFields; + Set expected = new HashSet<>(META_FIELDS); + // Presence of fields in a role with an empty array implies access to no fields except the meta fields + assertResolved(new FieldPermissions(fieldPermissionDef(grantedFields, randomBoolean() ? null : new String[]{})), + expected, "foo", "bar"); + + // make sure meta fields cannot be denied access to + deniedFields = META_FIELDS.toArray(new String[0]); + assertResolved(new FieldPermissions(fieldPermissionDef(null, deniedFields)), + new HashSet<>(Arrays.asList("foo", "bar", "_some_plugin_meta_field"))); + + // check we can add all fields with * + grantedFields = new String[]{"*"}; + expected = new HashSet<>(META_FIELDS); + expected.add("foo"); + assertResolved(new FieldPermissions(fieldPermissionDef(grantedFields, randomBoolean() ? null : new String[]{})), expected); + + // same with null + grantedFields = null; + assertResolved(new FieldPermissions(fieldPermissionDef(grantedFields, randomBoolean() ? null : new String[]{})), expected); + + // check we remove only excluded fields + grantedFields = new String[]{"*"}; + deniedFields = new String[]{"xfield"}; + expected = new HashSet<>(META_FIELDS); + expected.add("foo"); + assertResolved(new FieldPermissions(fieldPermissionDef(grantedFields, deniedFields)), expected, "xfield"); + + // same with null + grantedFields = null; + assertResolved(new FieldPermissions(fieldPermissionDef(grantedFields, deniedFields)), expected, "xfield"); + + // some other checks + grantedFields = new String[]{"field*"}; + deniedFields = new String[]{"field1", "field2"}; + expected = new HashSet<>(META_FIELDS); + expected.add("field3"); + assertResolved(new FieldPermissions(fieldPermissionDef(grantedFields, deniedFields)), expected, "field1", "field2"); + + grantedFields = new String[]{"field1", "field2"}; + deniedFields = new String[]{"field2"}; + expected = new HashSet<>(META_FIELDS); + expected.add("field1"); + assertResolved(new FieldPermissions(fieldPermissionDef(grantedFields, deniedFields)), expected, "field1", "field2"); + + grantedFields = new String[]{"field*"}; + deniedFields = new String[]{"field2"}; + expected = new HashSet<>(META_FIELDS); + expected.add("field1"); + assertResolved(new FieldPermissions(fieldPermissionDef(grantedFields, deniedFields)), expected, "field2"); + + deniedFields = new String[]{"field*"}; + assertResolved(new FieldPermissions(fieldPermissionDef(grantedFields, deniedFields)), + META_FIELDS, "field1", "field2"); + + // empty array for allowed fields always means no field is allowed + grantedFields = new String[]{}; + deniedFields = new String[]{}; + assertResolved(new FieldPermissions(fieldPermissionDef(grantedFields, deniedFields)), + META_FIELDS, "field1", "field2"); + + // make sure all field can be explicitly allowed + grantedFields = new String[]{"*"}; + deniedFields = randomBoolean() ? null : new String[]{}; + expected = new HashSet<>(META_FIELDS); + expected.add("field1"); + assertResolved(new FieldPermissions(fieldPermissionDef(grantedFields, deniedFields)), expected); + } + + private static FieldPermissionsDefinition fieldPermissionDef(String[] granted, String[] denied) { + return new FieldPermissionsDefinition(granted, denied); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapperUnitTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapperUnitTests.java deleted file mode 100644 index 3da3949bad9..00000000000 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapperUnitTests.java +++ /dev/null @@ -1,561 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.core.security.authz.accesscontrol; - -import org.apache.lucene.analysis.standard.StandardAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; -import org.apache.lucene.document.Field.Store; -import org.apache.lucene.document.StringField; -import org.apache.lucene.index.DirectoryReader; -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.index.IndexWriterConfig; -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.index.NoMergePolicy; -import org.apache.lucene.index.PostingsEnum; -import org.apache.lucene.index.Term; -import org.apache.lucene.index.TermsEnum; -import org.apache.lucene.misc.SweetSpotSimilarity; -import org.apache.lucene.search.BulkScorer; -import org.apache.lucene.search.Explanation; -import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.LeafCollector; -import org.apache.lucene.search.MatchAllDocsQuery; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.QueryCachingPolicy; -import org.apache.lucene.search.Scorer; -import org.apache.lucene.search.TermQuery; -import org.apache.lucene.search.Weight; -import org.apache.lucene.store.Directory; -import org.apache.lucene.store.MMapDirectory; -import org.apache.lucene.util.Accountable; -import org.apache.lucene.util.BitSet; -import org.apache.lucene.util.FixedBitSet; -import org.apache.lucene.util.SparseFixedBitSet; -import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.core.internal.io.IOUtils; -import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.cache.bitset.BitsetFilterCache; -import org.elasticsearch.index.engine.Engine; -import org.elasticsearch.index.mapper.FieldNamesFieldMapper; -import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.SeqNoFieldMapper; -import org.elasticsearch.index.mapper.SourceFieldMapper; -import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.license.XPackLicenseState; -import org.elasticsearch.script.ScriptService; -import org.elasticsearch.search.aggregations.LeafBucketCollector; -import org.elasticsearch.search.internal.ContextIndexSearcher; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.IndexSettingsModule; -import org.elasticsearch.xpack.core.security.authz.accesscontrol.DocumentSubsetReader.DocumentSubsetDirectoryReader; -import org.elasticsearch.xpack.core.security.authz.permission.DocumentPermissions; -import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissions; -import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsDefinition; -import org.junit.After; -import org.junit.Before; - -import java.io.IOException; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashSet; -import java.util.IdentityHashMap; -import java.util.Set; - -import static java.util.Collections.singletonMap; -import static org.elasticsearch.xpack.core.security.authz.accesscontrol.SecurityIndexSearcherWrapper.intersectScorerAndRoleBits; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.sameInstance; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -public class SecurityIndexSearcherWrapperUnitTests extends ESTestCase { - - private static final Set META_FIELDS; - static { - final Set metaFields = new HashSet<>(Arrays.asList(MapperService.getAllMetaFields())); - metaFields.add(SourceFieldMapper.NAME); - metaFields.add(FieldNamesFieldMapper.NAME); - metaFields.add(SeqNoFieldMapper.NAME); - META_FIELDS = Collections.unmodifiableSet(metaFields); - } - - private ThreadContext threadContext; - private ScriptService scriptService; - private SecurityIndexSearcherWrapper securityIndexSearcherWrapper; - private ElasticsearchDirectoryReader esIn; - private XPackLicenseState licenseState; - private IndexSettings indexSettings; - - @Before - public void setup() throws Exception { - Index index = new Index("_index", "testUUID"); - scriptService = mock(ScriptService.class); - indexSettings = IndexSettingsModule.newIndexSettings(index, Settings.EMPTY); - - ShardId shardId = new ShardId(index, 0); - licenseState = mock(XPackLicenseState.class); - when(licenseState.isDocumentAndFieldLevelSecurityAllowed()).thenReturn(true); - threadContext = new ThreadContext(Settings.EMPTY); - IndexShard indexShard = mock(IndexShard.class); - when(indexShard.shardId()).thenReturn(shardId); - - Directory directory = new MMapDirectory(createTempDir()); - IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig()); - writer.close(); - - DirectoryReader in = DirectoryReader.open(directory); // unfortunately DirectoryReader isn't mock friendly - esIn = ElasticsearchDirectoryReader.wrap(in, shardId); - } - - @After - public void tearDown() throws Exception { - super.tearDown(); - esIn.close(); - } - - public void testDefaultMetaFields() throws Exception { - securityIndexSearcherWrapper = - new SecurityIndexSearcherWrapper(null, null, threadContext, licenseState, scriptService) { - @Override - protected IndicesAccessControl getIndicesAccessControl() { - IndicesAccessControl.IndexAccessControl indexAccessControl = new IndicesAccessControl.IndexAccessControl(true, - new FieldPermissions(fieldPermissionDef(new String[]{}, null)), DocumentPermissions.allowAll()); - return new IndicesAccessControl(true, singletonMap("_index", indexAccessControl)); - } - }; - - FieldSubsetReader.FieldSubsetDirectoryReader result = - (FieldSubsetReader.FieldSubsetDirectoryReader) securityIndexSearcherWrapper.wrap(esIn); - assertThat(result.getFilter().run("_uid"), is(true)); - assertThat(result.getFilter().run("_id"), is(true)); - assertThat(result.getFilter().run("_version"), is(true)); - assertThat(result.getFilter().run("_type"), is(true)); - assertThat(result.getFilter().run("_source"), is(true)); - assertThat(result.getFilter().run("_routing"), is(true)); - assertThat(result.getFilter().run("_timestamp"), is(true)); - assertThat(result.getFilter().run("_ttl"), is(true)); - assertThat(result.getFilter().run("_size"), is(true)); - assertThat(result.getFilter().run("_index"), is(true)); - assertThat(result.getFilter().run("_field_names"), is(true)); - assertThat(result.getFilter().run("_seq_no"), is(true)); - assertThat(result.getFilter().run("_some_random_meta_field"), is(true)); - assertThat(result.getFilter().run("some_random_regular_field"), is(false)); - } - - public void testWrapReaderWhenFeatureDisabled() throws Exception { - when(licenseState.isDocumentAndFieldLevelSecurityAllowed()).thenReturn(false); - securityIndexSearcherWrapper = - new SecurityIndexSearcherWrapper(null, null, threadContext, licenseState, scriptService); - DirectoryReader reader = securityIndexSearcherWrapper.wrap(esIn); - assertThat(reader, sameInstance(esIn)); - } - - public void testWrapSearcherWhenFeatureDisabled() throws Exception { - securityIndexSearcherWrapper = - new SecurityIndexSearcherWrapper(null, null, threadContext, licenseState, scriptService); - IndexSearcher indexSearcher = new IndexSearcher(esIn); - IndexSearcher result = securityIndexSearcherWrapper.wrap(indexSearcher); - assertThat(result, sameInstance(indexSearcher)); - } - - public void testWildcards() throws Exception { - Set expected = new HashSet<>(META_FIELDS); - expected.add("field1_a"); - expected.add("field1_b"); - expected.add("field1_c"); - assertResolved(new FieldPermissions(fieldPermissionDef(new String[] {"field1*"}, null)), expected, "field", "field2"); - } - - public void testDotNotion() throws Exception { - Set expected = new HashSet<>(META_FIELDS); - expected.add("foo.bar"); - assertResolved(new FieldPermissions(fieldPermissionDef(new String[] {"foo.bar"}, null)), expected, "foo", "foo.baz", "bar.foo"); - - expected = new HashSet<>(META_FIELDS); - expected.add("foo.bar"); - assertResolved(new FieldPermissions(fieldPermissionDef(new String[] {"foo.*"}, null)), expected, "foo", "bar"); - } - - public void testDelegateSimilarity() throws Exception { - IndexSettings settings = IndexSettingsModule.newIndexSettings("_index", Settings.EMPTY); - BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(settings, new BitsetFilterCache.Listener() { - @Override - public void onCache(ShardId shardId, Accountable accountable) { - } - - @Override - public void onRemoval(ShardId shardId, Accountable accountable) { - - } - }); - DirectoryReader directoryReader = DocumentSubsetReader.wrap(esIn, bitsetFilterCache, new MatchAllDocsQuery()); - IndexSearcher indexSearcher = new IndexSearcher(directoryReader); - indexSearcher.setSimilarity(new SweetSpotSimilarity()); - indexSearcher.setQueryCachingPolicy(new QueryCachingPolicy() { - @Override - public void onUse(Query query) { - } - - @Override - public boolean shouldCache(Query query) { - return false; - } - }); - indexSearcher.setQueryCache((weight, policy) -> weight); - securityIndexSearcherWrapper = - new SecurityIndexSearcherWrapper(null, null, threadContext, licenseState, scriptService); - IndexSearcher result = securityIndexSearcherWrapper.wrap(indexSearcher); - assertThat(result, not(sameInstance(indexSearcher))); - assertThat(result.getSimilarity(), sameInstance(indexSearcher.getSimilarity())); - assertThat(result.getQueryCachingPolicy(), sameInstance(indexSearcher.getQueryCachingPolicy())); - assertThat(result.getQueryCache(), sameInstance(indexSearcher.getQueryCache())); - bitsetFilterCache.close(); - } - - public void testIntersectScorerAndRoleBits() throws Exception { - securityIndexSearcherWrapper = - new SecurityIndexSearcherWrapper(null, null, threadContext, licenseState, scriptService); - final Directory directory = newDirectory(); - IndexWriter iw = new IndexWriter( - directory, - new IndexWriterConfig(new StandardAnalyzer()).setMergePolicy(NoMergePolicy.INSTANCE) - ); - - Document document = new Document(); - document.add(new StringField("field1", "value1", Field.Store.NO)); - document.add(new StringField("field2", "value1", Field.Store.NO)); - iw.addDocument(document); - - document = new Document(); - document.add(new StringField("field1", "value2", Field.Store.NO)); - document.add(new StringField("field2", "value1", Field.Store.NO)); - iw.addDocument(document); - - document = new Document(); - document.add(new StringField("field1", "value3", Field.Store.NO)); - document.add(new StringField("field2", "value1", Field.Store.NO)); - iw.addDocument(document); - - document = new Document(); - document.add(new StringField("field1", "value4", Field.Store.NO)); - document.add(new StringField("field2", "value1", Field.Store.NO)); - iw.addDocument(document); - - iw.commit(); - iw.deleteDocuments(new Term("field1", "value3")); - iw.close(); - DirectoryReader directoryReader = DirectoryReader.open(directory); - IndexSearcher searcher = new IndexSearcher(directoryReader); - Weight weight = searcher.createWeight(new TermQuery(new Term("field2", "value1")), - org.apache.lucene.search.ScoreMode.COMPLETE_NO_SCORES, 1f); - - LeafReaderContext leaf = directoryReader.leaves().get(0); - - SparseFixedBitSet sparseFixedBitSet = query(leaf, "field1", "value1"); - LeafCollector leafCollector = new LeafBucketCollector() { - @Override - public void collect(int doc, long bucket) throws IOException { - assertThat(doc, equalTo(0)); - } - }; - intersectScorerAndRoleBits(weight.scorer(leaf), sparseFixedBitSet, leafCollector, leaf.reader().getLiveDocs()); - - sparseFixedBitSet = query(leaf, "field1", "value2"); - leafCollector = new LeafBucketCollector() { - @Override - public void collect(int doc, long bucket) throws IOException { - assertThat(doc, equalTo(1)); - } - }; - intersectScorerAndRoleBits(weight.scorer(leaf), sparseFixedBitSet, leafCollector, leaf.reader().getLiveDocs()); - - - sparseFixedBitSet = query(leaf, "field1", "value3"); - leafCollector = new LeafBucketCollector() { - @Override - public void collect(int doc, long bucket) throws IOException { - fail("docId [" + doc + "] should have been deleted"); - } - }; - intersectScorerAndRoleBits(weight.scorer(leaf), sparseFixedBitSet, leafCollector, leaf.reader().getLiveDocs()); - - sparseFixedBitSet = query(leaf, "field1", "value4"); - leafCollector = new LeafBucketCollector() { - @Override - public void collect(int doc, long bucket) throws IOException { - assertThat(doc, equalTo(3)); - } - }; - intersectScorerAndRoleBits(weight.scorer(leaf), sparseFixedBitSet, leafCollector, leaf.reader().getLiveDocs()); - - directoryReader.close(); - directory.close(); - } - - private void assertResolved(FieldPermissions permissions, Set expected, String... fieldsToTest) { - for (String field : expected) { - assertThat(field, permissions.grantsAccessTo(field), is(true)); - } - for (String field : fieldsToTest) { - assertThat(field, permissions.grantsAccessTo(field), is(expected.contains(field))); - } - } - - public void testFieldPermissionsWithFieldExceptions() throws Exception { - securityIndexSearcherWrapper = - new SecurityIndexSearcherWrapper(null, null, threadContext, licenseState, null); - String[] grantedFields = new String[]{}; - String[] deniedFields; - Set expected = new HashSet<>(META_FIELDS); - // Presence of fields in a role with an empty array implies access to no fields except the meta fields - assertResolved(new FieldPermissions(fieldPermissionDef(grantedFields, randomBoolean() ? null : new String[]{})), - expected, "foo", "bar"); - - // make sure meta fields cannot be denied access to - deniedFields = META_FIELDS.toArray(new String[0]); - assertResolved(new FieldPermissions(fieldPermissionDef(null, deniedFields)), - new HashSet<>(Arrays.asList("foo", "bar", "_some_plugin_meta_field"))); - - // check we can add all fields with * - grantedFields = new String[]{"*"}; - expected = new HashSet<>(META_FIELDS); - expected.add("foo"); - assertResolved(new FieldPermissions(fieldPermissionDef(grantedFields, randomBoolean() ? null : new String[]{})), expected); - - // same with null - grantedFields = null; - assertResolved(new FieldPermissions(fieldPermissionDef(grantedFields, randomBoolean() ? null : new String[]{})), expected); - - // check we remove only excluded fields - grantedFields = new String[]{"*"}; - deniedFields = new String[]{"xfield"}; - expected = new HashSet<>(META_FIELDS); - expected.add("foo"); - assertResolved(new FieldPermissions(fieldPermissionDef(grantedFields, deniedFields)), expected, "xfield"); - - // same with null - grantedFields = null; - assertResolved(new FieldPermissions(fieldPermissionDef(grantedFields, deniedFields)), expected, "xfield"); - - // some other checks - grantedFields = new String[]{"field*"}; - deniedFields = new String[]{"field1", "field2"}; - expected = new HashSet<>(META_FIELDS); - expected.add("field3"); - assertResolved(new FieldPermissions(fieldPermissionDef(grantedFields, deniedFields)), expected, "field1", "field2"); - - grantedFields = new String[]{"field1", "field2"}; - deniedFields = new String[]{"field2"}; - expected = new HashSet<>(META_FIELDS); - expected.add("field1"); - assertResolved(new FieldPermissions(fieldPermissionDef(grantedFields, deniedFields)), expected, "field1", "field2"); - - grantedFields = new String[]{"field*"}; - deniedFields = new String[]{"field2"}; - expected = new HashSet<>(META_FIELDS); - expected.add("field1"); - assertResolved(new FieldPermissions(fieldPermissionDef(grantedFields, deniedFields)), expected, "field2"); - - deniedFields = new String[]{"field*"}; - assertResolved(new FieldPermissions(fieldPermissionDef(grantedFields, deniedFields)), - META_FIELDS, "field1", "field2"); - - // empty array for allowed fields always means no field is allowed - grantedFields = new String[]{}; - deniedFields = new String[]{}; - assertResolved(new FieldPermissions(fieldPermissionDef(grantedFields, deniedFields)), - META_FIELDS, "field1", "field2"); - - // make sure all field can be explicitly allowed - grantedFields = new String[]{"*"}; - deniedFields = randomBoolean() ? null : new String[]{}; - expected = new HashSet<>(META_FIELDS); - expected.add("field1"); - assertResolved(new FieldPermissions(fieldPermissionDef(grantedFields, deniedFields)), expected); - } - - private SparseFixedBitSet query(LeafReaderContext leaf, String field, String value) throws IOException { - SparseFixedBitSet sparseFixedBitSet = new SparseFixedBitSet(leaf.reader().maxDoc()); - TermsEnum tenum = leaf.reader().terms(field).iterator(); - while (tenum.next().utf8ToString().equals(value) == false) { - } - PostingsEnum penum = tenum.postings(null); - sparseFixedBitSet.or(penum); - return sparseFixedBitSet; - } - - public void testIndexSearcherWrapperSparseNoDeletions() throws IOException { - doTestIndexSearcherWrapper(true, false); - } - - public void testIndexSearcherWrapperDenseNoDeletions() throws IOException { - doTestIndexSearcherWrapper(false, false); - } - - public void testIndexSearcherWrapperSparseWithDeletions() throws IOException { - doTestIndexSearcherWrapper(true, true); - } - - public void testIndexSearcherWrapperDenseWithDeletions() throws IOException { - doTestIndexSearcherWrapper(false, true); - } - - static class CreateScorerOnceWeight extends Weight { - - private final Weight weight; - private final Set seenLeaves = Collections.newSetFromMap(new IdentityHashMap<>()); - - protected CreateScorerOnceWeight(Weight weight) { - super(weight.getQuery()); - this.weight = weight; - } - - @Override - public void extractTerms(Set terms) { - weight.extractTerms(terms); - } - - @Override - public Explanation explain(LeafReaderContext context, int doc) throws IOException { - return weight.explain(context, doc); - } - - @Override - public Scorer scorer(LeafReaderContext context) throws IOException { - assertTrue(seenLeaves.add(context.reader().getCoreCacheHelper().getKey())); - return weight.scorer(context); - } - - @Override - public BulkScorer bulkScorer(LeafReaderContext context) - throws IOException { - assertTrue(seenLeaves.add(context.reader().getCoreCacheHelper().getKey())); - return weight.bulkScorer(context); - } - - @Override - public boolean isCacheable(LeafReaderContext ctx) { - return true; - } - } - - static class CreateScorerOnceQuery extends Query { - - private final Query query; - - CreateScorerOnceQuery(Query query) { - this.query = query; - } - - @Override - public String toString(String field) { - return query.toString(field); - } - - @Override - public Query rewrite(IndexReader reader) throws IOException { - Query queryRewritten = query.rewrite(reader); - if (query != queryRewritten) { - return new CreateScorerOnceQuery(queryRewritten); - } - return super.rewrite(reader); - } - - @Override - public Weight createWeight(IndexSearcher searcher, org.apache.lucene.search.ScoreMode scoreMode, float boost) throws IOException { - return new CreateScorerOnceWeight(query.createWeight(searcher, scoreMode, boost)); - } - - @Override - public boolean equals(Object obj) { - return sameClassAs(obj) && query.equals(((CreateScorerOnceQuery) obj).query); - } - - @Override - public int hashCode() { - return 31 * classHash() + query.hashCode(); - } - } - - public void doTestIndexSearcherWrapper(boolean sparse, boolean deletions) throws IOException { - Directory dir = newDirectory(); - IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(null)); - Document doc = new Document(); - StringField allowedField = new StringField("allowed", "yes", Store.NO); - doc.add(allowedField); - StringField fooField = new StringField("foo", "bar", Store.NO); - doc.add(fooField); - StringField deleteField = new StringField("delete", "no", Store.NO); - doc.add(deleteField); - w.addDocument(doc); - if (deletions) { - // add a document that matches foo:bar but will be deleted - deleteField.setStringValue("yes"); - w.addDocument(doc); - deleteField.setStringValue("no"); - } - allowedField.setStringValue("no"); - w.addDocument(doc); - if (sparse) { - for (int i = 0; i < 1000; ++i) { - w.addDocument(doc); - } - w.forceMerge(1); - } - w.deleteDocuments(new Term("delete", "yes")); - - IndexSettings settings = IndexSettingsModule.newIndexSettings("_index", Settings.EMPTY); - BitsetFilterCache.Listener listener = new BitsetFilterCache.Listener() { - @Override - public void onCache(ShardId shardId, Accountable accountable) { - - } - - @Override - public void onRemoval(ShardId shardId, Accountable accountable) { - - } - }; - DirectoryReader reader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(w), new ShardId(indexSettings.getIndex(), 0)); - BitsetFilterCache cache = new BitsetFilterCache(settings, listener); - Query roleQuery = new TermQuery(new Term("allowed", "yes")); - BitSet bitSet = cache.getBitSetProducer(roleQuery).getBitSet(reader.leaves().get(0)); - if (sparse) { - assertThat(bitSet, instanceOf(SparseFixedBitSet.class)); - } else { - assertThat(bitSet, instanceOf(FixedBitSet.class)); - } - - DocumentSubsetDirectoryReader filteredReader = DocumentSubsetReader.wrap(reader, cache, roleQuery); - IndexSearcher wrapSearcher = new SecurityIndexSearcherWrapper.IndexSearcherWrapper(filteredReader); - Engine.Searcher engineSearcher = new Engine.Searcher("test", wrapSearcher, () -> {}); - ContextIndexSearcher searcher = new ContextIndexSearcher(engineSearcher, - wrapSearcher.getQueryCache(), wrapSearcher.getQueryCachingPolicy()); - searcher.setCheckCancelled(() -> {}); - - // Searching a non-existing term will trigger a null scorer - assertEquals(0, searcher.count(new TermQuery(new Term("non_existing_field", "non_existing_value")))); - - assertEquals(1, searcher.count(new TermQuery(new Term("foo", "bar")))); - - // make sure scorers are created only once, see #1725 - assertEquals(1, searcher.count(new CreateScorerOnceQuery(new MatchAllDocsQuery()))); - IOUtils.close(reader, w, dir); - } - - private static FieldPermissionsDefinition fieldPermissionDef(String[] granted, String[] denied) { - return new FieldPermissionsDefinition(granted, denied); - } -} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index abb02d617ab..986a39ab255 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -119,7 +119,7 @@ import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken import org.elasticsearch.xpack.core.security.authz.AuthorizationEngine; import org.elasticsearch.xpack.core.security.authz.AuthorizationServiceField; import org.elasticsearch.xpack.core.security.authz.accesscontrol.IndicesAccessControl; -import org.elasticsearch.xpack.core.security.authz.accesscontrol.SecurityIndexSearcherWrapper; +import org.elasticsearch.xpack.core.security.authz.accesscontrol.SecurityIndexReaderWrapper; import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissions; import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsCache; import org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore; @@ -692,8 +692,8 @@ public class Security extends Plugin implements ActionPlugin, IngestPlugin, Netw if (enabled) { assert getLicenseState() != null; if (XPackSettings.DLS_FLS_ENABLED.get(settings)) { - module.setSearcherWrapper(indexService -> - new SecurityIndexSearcherWrapper( + module.setReaderWrapper(indexService -> + new SecurityIndexReaderWrapper( shardId -> indexService.newQueryShardContext(shardId.id(), // we pass a null index reader, which is legal and will disable rewrite optimizations // based on index statistics, which is probably safer... From f02cbe9e402d0f96c001ed80b6a10bdcc863e3f7 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Fri, 28 Jun 2019 16:18:54 +0200 Subject: [PATCH 32/42] Trim translog for closed indices (#43156) Today when an index is closed all its shards are forced flushed but the translog files are left around. As explained in #42445 we'd like to trim the translog for closed indices in order to consume less disk space. This commit reuses the existing AsyncTrimTranslogTask task and reenables it for closed indices. At the time the task is executed, we should have the guarantee that nothing holds the translog files that are going to be removed. It also leaves a short period of time (10 min) during which translog files of a recently closed index are still present on disk. This could also help in some cases where the closed index is reopened shortly after being closed (in order to update an index setting for example). Relates to #42445 --- .../org/elasticsearch/index/IndexService.java | 9 ++- .../index/engine/NoOpEngine.java | 58 ++++++++++++++++++- .../index/IndexServiceTests.java | 48 ++++++++++++++- .../index/engine/NoOpEngineTests.java | 43 +++++++++++++- 4 files changed, 153 insertions(+), 5 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/IndexService.java b/server/src/main/java/org/elasticsearch/index/IndexService.java index d09dcd02b89..2e27423cbcd 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexService.java +++ b/server/src/main/java/org/elasticsearch/index/IndexService.java @@ -944,6 +944,11 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust .getSettings().getAsTime(INDEX_TRANSLOG_RETENTION_CHECK_INTERVAL_SETTING, TimeValue.timeValueMinutes(10))); } + @Override + protected boolean mustReschedule() { + return indexService.closed.get() == false; + } + @Override protected void runInternal() { indexService.maybeTrimTranslog(); @@ -1035,8 +1040,8 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust return fsyncTask; } - AsyncGlobalCheckpointTask getGlobalCheckpointTask() { - return globalCheckpointTask; + AsyncTrimTranslogTask getTrimTranslogTask() { // for tests + return trimTranslogTask; } /** diff --git a/server/src/main/java/org/elasticsearch/index/engine/NoOpEngine.java b/server/src/main/java/org/elasticsearch/index/engine/NoOpEngine.java index 7f474d1be24..007a13351df 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/NoOpEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/NoOpEngine.java @@ -27,16 +27,24 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SegmentReader; import org.apache.lucene.store.Directory; import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.common.util.concurrent.ReleasableLock; +import org.elasticsearch.index.store.Store; +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.index.translog.TranslogConfig; +import org.elasticsearch.index.translog.TranslogDeletionPolicy; import java.io.IOException; import java.io.UncheckedIOException; import java.util.List; +import java.util.Map; import java.util.function.Function; /** * NoOpEngine is an engine implementation that does nothing but the bare minimum * required in order to have an engine. All attempts to do something (search, - * index, get), throw {@link UnsupportedOperationException}. + * index, get), throw {@link UnsupportedOperationException}. However, NoOpEngine + * allows to trim any existing translog files through the usage of the + * {{@link #trimUnreferencedTranslogFiles()}} method. */ public final class NoOpEngine extends ReadOnlyEngine { @@ -116,4 +124,52 @@ public final class NoOpEngine extends ReadOnlyEngine { return super.segmentsStats(includeSegmentFileSizes, includeUnloadedSegments); } } + + /** + * This implementation will trim existing translog files using a {@link TranslogDeletionPolicy} + * that retains nothing but the last translog generation from safe commit. + */ + @Override + public void trimUnreferencedTranslogFiles() { + final Store store = this.engineConfig.getStore(); + store.incRef(); + try (ReleasableLock lock = readLock.acquire()) { + ensureOpen(); + final List commits = DirectoryReader.listCommits(store.directory()); + if (commits.size() == 1) { + final Map commitUserData = getLastCommittedSegmentInfos().getUserData(); + final String translogUuid = commitUserData.get(Translog.TRANSLOG_UUID_KEY); + if (translogUuid == null) { + throw new IllegalStateException("commit doesn't contain translog unique id"); + } + if (commitUserData.containsKey(Translog.TRANSLOG_GENERATION_KEY) == false) { + throw new IllegalStateException("commit doesn't contain translog generation id"); + } + final long lastCommitGeneration = Long.parseLong(commitUserData.get(Translog.TRANSLOG_GENERATION_KEY)); + final TranslogConfig translogConfig = engineConfig.getTranslogConfig(); + final long minTranslogGeneration = Translog.readMinTranslogGeneration(translogConfig.getTranslogPath(), translogUuid); + + if (minTranslogGeneration < lastCommitGeneration) { + // a translog deletion policy that retains nothing but the last translog generation from safe commit + final TranslogDeletionPolicy translogDeletionPolicy = new TranslogDeletionPolicy(-1, -1); + translogDeletionPolicy.setTranslogGenerationOfLastCommit(lastCommitGeneration); + translogDeletionPolicy.setMinTranslogGenerationForRecovery(lastCommitGeneration); + + try (Translog translog = new Translog(translogConfig, translogUuid, translogDeletionPolicy, + engineConfig.getGlobalCheckpointSupplier(), engineConfig.getPrimaryTermSupplier(), seqNo -> {})) { + translog.trimUnreferencedReaders(); + } + } + } + } catch (final Exception e) { + try { + failEngine("translog trimming failed", e); + } catch (Exception inner) { + e.addSuppressed(inner); + } + throw new EngineException(shardId, "failed to trim translog", e); + } finally { + store.decRef(); + } + } } diff --git a/server/src/test/java/org/elasticsearch/index/IndexServiceTests.java b/server/src/test/java/org/elasticsearch/index/IndexServiceTests.java index 2d4030a51ce..5f1081f731b 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexServiceTests.java @@ -21,6 +21,7 @@ package org.elasticsearch.index; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.TopDocs; +import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.compress.CompressedXContent; @@ -42,12 +43,15 @@ import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; +import java.nio.file.Path; import java.util.Collection; import java.util.Collections; +import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; +import static org.elasticsearch.index.shard.IndexShardTestCase.getEngine; import static org.elasticsearch.test.InternalSettingsPlugin.TRANSLOG_RETENTION_CHECK_INTERVAL_SETTING; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.core.IsEqual.equalTo; @@ -370,7 +374,7 @@ public class IndexServiceTests extends ESSingleNodeTestCase { .build(); IndexService indexService = createIndex("test", settings); ensureGreen("test"); - assertTrue(indexService.getRefreshTask().mustReschedule()); + assertTrue(indexService.getTrimTranslogTask().mustReschedule()); client().prepareIndex("test", "test", "1").setSource("{\"foo\": \"bar\"}", XContentType.JSON).get(); client().admin().indices().prepareFlush("test").get(); client().admin().indices().prepareUpdateSettings("test") @@ -382,6 +386,48 @@ public class IndexServiceTests extends ESSingleNodeTestCase { assertBusy(() -> assertThat(IndexShardTestCase.getTranslog(shard).totalOperations(), equalTo(0))); } + public void testAsyncTranslogTrimTaskOnClosedIndex() throws Exception { + final String indexName = "test"; + IndexService indexService = createIndex(indexName, Settings.builder() + .put(TRANSLOG_RETENTION_CHECK_INTERVAL_SETTING.getKey(), "100ms") + .build()); + + Translog translog = IndexShardTestCase.getTranslog(indexService.getShard(0)); + final Path translogPath = translog.getConfig().getTranslogPath(); + final String translogUuid = translog.getTranslogUUID(); + + final int numDocs = scaledRandomIntBetween(10, 100); + for (int i = 0; i < numDocs; i++) { + client().prepareIndex().setIndex(indexName).setId(String.valueOf(i)).setSource("{\"foo\": \"bar\"}", XContentType.JSON).get(); + if (randomBoolean()) { + client().admin().indices().prepareFlush(indexName).get(); + } + } + assertThat(translog.totalOperations(), equalTo(numDocs)); + assertThat(translog.stats().estimatedNumberOfOperations(), equalTo(numDocs)); + assertAcked(client().admin().indices().prepareClose("test").setWaitForActiveShards(ActiveShardCount.DEFAULT)); + + indexService = getInstanceFromNode(IndicesService.class).indexServiceSafe(indexService.index()); + assertTrue(indexService.getTrimTranslogTask().mustReschedule()); + + final long lastCommitedTranslogGeneration; + try (Engine.IndexCommitRef indexCommitRef = getEngine(indexService.getShard(0)).acquireLastIndexCommit(false)) { + Map lastCommittedUserData = indexCommitRef.getIndexCommit().getUserData(); + lastCommitedTranslogGeneration = Long.parseLong(lastCommittedUserData.get(Translog.TRANSLOG_GENERATION_KEY)); + } + assertBusy(() -> { + long minTranslogGen = Translog.readMinTranslogGeneration(translogPath, translogUuid); + assertThat(minTranslogGen, equalTo(lastCommitedTranslogGeneration)); + }); + + assertAcked(client().admin().indices().prepareOpen("test").setWaitForActiveShards(ActiveShardCount.DEFAULT)); + + indexService = getInstanceFromNode(IndicesService.class).indexServiceSafe(indexService.index()); + translog = IndexShardTestCase.getTranslog(indexService.getShard(0)); + assertThat(translog.totalOperations(), equalTo(0)); + assertThat(translog.stats().estimatedNumberOfOperations(), equalTo(0)); + } + public void testIllegalFsyncInterval() { Settings settings = Settings.builder() .put(IndexSettings.INDEX_TRANSLOG_SYNC_INTERVAL_SETTING.getKey(), "0ms") // disable diff --git a/server/src/test/java/org/elasticsearch/index/engine/NoOpEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/NoOpEngineTests.java index 6f74ac23a8e..f45eab0e057 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/NoOpEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/NoOpEngineTests.java @@ -35,6 +35,7 @@ import org.elasticsearch.index.seqno.ReplicationTracker; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.DocsStats; import org.elasticsearch.index.store.Store; +import org.elasticsearch.index.translog.Translog; import org.elasticsearch.index.translog.TranslogDeletionPolicy; import org.elasticsearch.test.IndexSettingsModule; @@ -42,6 +43,7 @@ import java.io.IOException; import java.io.UncheckedIOException; import java.nio.file.Path; import java.util.Collections; +import java.util.Map; import java.util.concurrent.atomic.AtomicLong; import static org.hamcrest.Matchers.equalTo; @@ -83,7 +85,7 @@ public class NoOpEngineTests extends EngineTestCase { tracker.updateLocalCheckpoint(allocationId.getId(), i); } - flushAndTrimTranslog(engine); + engine.flush(true, true); long localCheckpoint = engine.getPersistedLocalCheckpoint(); long maxSeqNo = engine.getSeqNoStats(100L).getMaxSeqNo(); @@ -159,6 +161,45 @@ public class NoOpEngineTests extends EngineTestCase { } } + public void testTrimUnreferencedTranslogFiles() throws Exception { + final ReplicationTracker tracker = (ReplicationTracker) engine.config().getGlobalCheckpointSupplier(); + ShardRouting routing = TestShardRouting.newShardRouting("test", shardId.id(), "node", + null, true, ShardRoutingState.STARTED, allocationId); + IndexShardRoutingTable table = new IndexShardRoutingTable.Builder(shardId).addShard(routing).build(); + tracker.updateFromMaster(1L, Collections.singleton(allocationId.getId()), table); + tracker.activatePrimaryMode(SequenceNumbers.NO_OPS_PERFORMED); + + final int numDocs = scaledRandomIntBetween(10, 3000); + for (int i = 0; i < numDocs; i++) { + engine.index(indexForDoc(createParsedDoc(Integer.toString(i), null))); + if (rarely()) { + engine.flush(); + } + tracker.updateLocalCheckpoint(allocationId.getId(), i); + } + engine.flush(true, true); + + final String translogUuid = engine.getTranslog().getTranslogUUID(); + final long minFileGeneration = engine.getTranslog().getMinFileGeneration(); + final long currentFileGeneration = engine.getTranslog().currentFileGeneration(); + engine.close(); + + final NoOpEngine noOpEngine = new NoOpEngine(noOpConfig(INDEX_SETTINGS, store, primaryTranslogDir, tracker)); + final Path translogPath = noOpEngine.config().getTranslogConfig().getTranslogPath(); + + final long lastCommitedTranslogGeneration; + try (Engine.IndexCommitRef indexCommitRef = noOpEngine.acquireLastIndexCommit(false)) { + Map lastCommittedUserData = indexCommitRef.getIndexCommit().getUserData(); + lastCommitedTranslogGeneration = Long.parseLong(lastCommittedUserData.get(Translog.TRANSLOG_GENERATION_KEY)); + assertThat(lastCommitedTranslogGeneration, equalTo(currentFileGeneration)); + } + + assertThat(Translog.readMinTranslogGeneration(translogPath, translogUuid), equalTo(minFileGeneration)); + noOpEngine.trimUnreferencedTranslogFiles(); + assertThat(Translog.readMinTranslogGeneration(translogPath, translogUuid), equalTo(lastCommitedTranslogGeneration)); + noOpEngine.close(); + } + private void flushAndTrimTranslog(final InternalEngine engine) { engine.flush(true, true); final TranslogDeletionPolicy deletionPolicy = engine.getTranslog().getDeletionPolicy(); From 67a3c656c35c4861b662bc52ac845a8c365cd978 Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Fri, 28 Jun 2019 10:02:37 -0500 Subject: [PATCH 33/42] [7.x] [ML][Data Frame] removing format support (#43659) (#43747) * [ML][Data Frame] removing format support (#43659) * Fixing conflicts --- .../pivot/DateHistogramGroupSource.java | 35 +++---------------- .../pivot/DateHistogramGroupSourceTests.java | 1 - .../hlrc/DateHistogramGroupSourceTests.java | 4 --- .../pivot/DateHistogramGroupSource.java | 30 ++++++---------- .../pivot/DateHistogramGroupSourceTests.java | 19 ++++++++-- .../integration/DataFrameIntegTestCase.java | 12 +++---- .../integration/DataFrameTransformIT.java | 4 +-- .../integration/DataFramePivotRestIT.java | 8 ++--- .../dataframe/persistence/DataframeIndex.java | 19 ++-------- 9 files changed, 45 insertions(+), 87 deletions(-) diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/DateHistogramGroupSource.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/DateHistogramGroupSource.java index d880bfd8214..c8fb885896d 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/DateHistogramGroupSource.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/DateHistogramGroupSource.java @@ -45,7 +45,6 @@ import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optiona public class DateHistogramGroupSource extends SingleGroupSource implements ToXContentObject { private static final ParseField TIME_ZONE = new ParseField("time_zone"); - private static final ParseField FORMAT = new ParseField("format"); // From DateHistogramAggregationBuilder in core, transplanted and modified to a set // so we don't need to import a dependency on the class @@ -195,8 +194,7 @@ public class DateHistogramGroupSource extends SingleGroupSource implements ToXCo } ZoneId zoneId = (ZoneId) args[3]; - String format = (String) args[4]; - return new DateHistogramGroupSource(field, interval, format, zoneId); + return new DateHistogramGroupSource(field, interval, zoneId); }); static { @@ -212,8 +210,6 @@ public class DateHistogramGroupSource extends SingleGroupSource implements ToXCo return ZoneOffset.ofHours(p.intValue()); } }, TIME_ZONE, ObjectParser.ValueType.LONG); - - PARSER.declareString(optionalConstructorArg(), FORMAT); } public static DateHistogramGroupSource fromXContent(final XContentParser parser) { @@ -221,13 +217,11 @@ public class DateHistogramGroupSource extends SingleGroupSource implements ToXCo } private final Interval interval; - private final String format; private final ZoneId timeZone; - DateHistogramGroupSource(String field, Interval interval, String format, ZoneId timeZone) { + DateHistogramGroupSource(String field, Interval interval, ZoneId timeZone) { super(field); this.interval = interval; - this.format = format; this.timeZone = timeZone; } @@ -240,10 +234,6 @@ public class DateHistogramGroupSource extends SingleGroupSource implements ToXCo return interval; } - public String getFormat() { - return format; - } - public ZoneId getTimeZone() { return timeZone; } @@ -258,9 +248,6 @@ public class DateHistogramGroupSource extends SingleGroupSource implements ToXCo if (timeZone != null) { builder.field(TIME_ZONE.getPreferredName(), timeZone.toString()); } - if (format != null) { - builder.field(FORMAT.getPreferredName(), format); - } builder.endObject(); return builder; } @@ -279,13 +266,12 @@ public class DateHistogramGroupSource extends SingleGroupSource implements ToXCo return Objects.equals(this.field, that.field) && Objects.equals(this.interval, that.interval) && - Objects.equals(this.timeZone, that.timeZone) && - Objects.equals(this.format, that.format); + Objects.equals(this.timeZone, that.timeZone); } @Override public int hashCode() { - return Objects.hash(field, interval, timeZone, format); + return Objects.hash(field, interval, timeZone); } public static Builder builder() { @@ -296,7 +282,6 @@ public class DateHistogramGroupSource extends SingleGroupSource implements ToXCo private String field; private Interval interval; - private String format; private ZoneId timeZone; /** @@ -319,16 +304,6 @@ public class DateHistogramGroupSource extends SingleGroupSource implements ToXCo return this; } - /** - * Set the optional String formatting for the time interval. - * @param format The format of the output for the time interval key - * @return The {@link Builder} with the format set. - */ - public Builder setFormat(String format) { - this.format = format; - return this; - } - /** * Sets the time zone to use for this aggregation * @param timeZone The zoneId for the timeZone @@ -340,7 +315,7 @@ public class DateHistogramGroupSource extends SingleGroupSource implements ToXCo } public DateHistogramGroupSource build() { - return new DateHistogramGroupSource(field, interval, format, timeZone); + return new DateHistogramGroupSource(field, interval, timeZone); } } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/DateHistogramGroupSourceTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/DateHistogramGroupSourceTests.java index 32605f5c286..ece1c4fb743 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/DateHistogramGroupSourceTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/DateHistogramGroupSourceTests.java @@ -39,7 +39,6 @@ public class DateHistogramGroupSourceTests extends AbstractXContentTestCase STRICT_PARSER = createParser(false); private static final ConstructingObjectParser LENIENT_PARSER = createParser(true); private final Interval interval; - private String format; private ZoneId timeZone; public DateHistogramGroupSource(String field, Interval interval) { @@ -205,7 +204,10 @@ public class DateHistogramGroupSource extends SingleGroupSource { super(in); this.interval = readInterval(in); this.timeZone = in.readOptionalZoneId(); - this.format = in.readOptionalString(); + // Format was optional in 7.2.x, removed in 7.3+ + if (in.getVersion().before(Version.V_7_3_0)) { + in.readOptionalString(); + } } private static ConstructingObjectParser createParser(boolean lenient) { @@ -242,7 +244,6 @@ public class DateHistogramGroupSource extends SingleGroupSource { } }, TIME_ZONE, ObjectParser.ValueType.LONG); - parser.declareString(DateHistogramGroupSource::setFormat, FORMAT); return parser; } @@ -259,14 +260,6 @@ public class DateHistogramGroupSource extends SingleGroupSource { return interval; } - public String getFormat() { - return format; - } - - public void setFormat(String format) { - this.format = format; - } - public ZoneId getTimeZone() { return timeZone; } @@ -280,7 +273,10 @@ public class DateHistogramGroupSource extends SingleGroupSource { out.writeOptionalString(field); writeInterval(interval, out); out.writeOptionalZoneId(timeZone); - out.writeOptionalString(format); + // Format was optional in 7.2.x, removed in 7.3+ + if (out.getVersion().before(Version.V_7_3_0)) { + out.writeOptionalString(null); + } } @Override @@ -293,9 +289,6 @@ public class DateHistogramGroupSource extends SingleGroupSource { if (timeZone != null) { builder.field(TIME_ZONE.getPreferredName(), timeZone.toString()); } - if (format != null) { - builder.field(FORMAT.getPreferredName(), format); - } builder.endObject(); return builder; } @@ -314,13 +307,12 @@ public class DateHistogramGroupSource extends SingleGroupSource { return Objects.equals(this.field, that.field) && Objects.equals(interval, that.interval) && - Objects.equals(timeZone, that.timeZone) && - Objects.equals(format, that.format); + Objects.equals(timeZone, that.timeZone); } @Override public int hashCode() { - return Objects.hash(field, interval, timeZone, format); + return Objects.hash(field, interval, timeZone); } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/DateHistogramGroupSourceTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/DateHistogramGroupSourceTests.java index 7ce03743313..b28cf603030 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/DateHistogramGroupSourceTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/DateHistogramGroupSourceTests.java @@ -6,6 +6,9 @@ package org.elasticsearch.xpack.core.dataframe.transforms.pivot; +import org.elasticsearch.Version; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; @@ -29,12 +32,22 @@ public class DateHistogramGroupSourceTests extends AbstractSerializingTestCase groups = new HashMap<>(); - groups.put("by-day", createDateHistogramGroupSourceWithCalendarInterval("timestamp", DateHistogramInterval.DAY, null, null)); + groups.put("by-day", createDateHistogramGroupSourceWithCalendarInterval("timestamp", DateHistogramInterval.DAY, null)); groups.put("by-user", TermsGroupSource.builder().setField("user_id").build()); groups.put("by-business", TermsGroupSource.builder().setField("business_id").build()); @@ -82,7 +82,7 @@ public class DataFrameTransformIT extends DataFrameIntegTestCase { createReviewsIndex(indexName, 100); Map groups = new HashMap<>(); - groups.put("by-day", createDateHistogramGroupSourceWithCalendarInterval("timestamp", DateHistogramInterval.DAY, null, null)); + groups.put("by-day", createDateHistogramGroupSourceWithCalendarInterval("timestamp", DateHistogramInterval.DAY, null)); groups.put("by-user", TermsGroupSource.builder().setField("user_id").build()); groups.put("by-business", TermsGroupSource.builder().setField("business_id").build()); diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java index 85457307fc8..f2cd95ed1a9 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java @@ -373,7 +373,7 @@ public class DataFramePivotRestIT extends DataFrameRestTestCase { + " \"group_by\": {" + " \"by_hr\": {" + " \"date_histogram\": {" - + " \"fixed_interval\": \"1h\",\"field\":\"timestamp\",\"format\":\"yyyy-MM-dd_HH\"" + + " \"fixed_interval\": \"1h\",\"field\":\"timestamp\"" + " } } }," + " \"aggregations\": {" + " \"avg_rating\": {" @@ -407,7 +407,7 @@ public class DataFramePivotRestIT extends DataFrameRestTestCase { config += " \"pivot\": {" + " \"group_by\": {" + " \"user.id\": {\"terms\": { \"field\": \"user_id\" }}," - + " \"by_day\": {\"date_histogram\": {\"fixed_interval\": \"1d\",\"field\":\"timestamp\",\"format\":\"yyyy-MM-dd\"}}}," + + " \"by_day\": {\"date_histogram\": {\"fixed_interval\": \"1d\",\"field\":\"timestamp\"}}}," + " \"aggregations\": {" + " \"user.avg_rating\": {" + " \"avg\": {" @@ -457,7 +457,7 @@ public class DataFramePivotRestIT extends DataFrameRestTestCase { + " \"pivot\": {" + " \"group_by\": {" + " \"user.id\": {\"terms\": { \"field\": \"user_id\" }}," - + " \"by_day\": {\"date_histogram\": {\"fixed_interval\": \"1d\",\"field\":\"timestamp\",\"format\":\"yyyy-MM-dd\"}}}," + + " \"by_day\": {\"date_histogram\": {\"fixed_interval\": \"1d\",\"field\":\"timestamp\"}}}," + " \"aggregations\": {" + " \"user.avg_rating\": {" + " \"avg\": {" @@ -497,7 +497,7 @@ public class DataFramePivotRestIT extends DataFrameRestTestCase { config +=" \"pivot\": { \n" + " \"group_by\": {\n" + " \"by_day\": {\"date_histogram\": {\n" + - " \"fixed_interval\": \"1d\",\"field\":\"timestamp\",\"format\":\"yyyy-MM-dd\"\n" + + " \"fixed_interval\": \"1d\",\"field\":\"timestamp\"\n" + " }}\n" + " },\n" + " \n" + diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataframeIndex.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataframeIndex.java index 75911aa8330..b3a6a80a0b1 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataframeIndex.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataframeIndex.java @@ -19,8 +19,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.pivot.DateHistogramGroupSource; -import org.elasticsearch.xpack.core.dataframe.transforms.pivot.SingleGroupSource; import java.io.IOException; import java.time.Clock; @@ -35,9 +33,7 @@ public final class DataframeIndex { public static final String DOC_TYPE = "_doc"; private static final String PROPERTIES = "properties"; private static final String TYPE = "type"; - private static final String FORMAT = "format"; private static final String META = "_meta"; - private static final String DEFAULT_TIME_FORMAT = "strict_date_optional_time||epoch_millis"; private DataframeIndex() { } @@ -56,7 +52,7 @@ public final class DataframeIndex { request.mapping( DOC_TYPE, - createMappingXContent(mappings, transformConfig.getPivotConfig().getGroupConfig().getGroups(), transformConfig.getId(), clock)); + createMappingXContent(mappings, transformConfig.getId(), clock)); client.execute(CreateIndexAction.INSTANCE, request, ActionListener.wrap(createIndexResponse -> { listener.onResponse(true); @@ -69,13 +65,12 @@ public final class DataframeIndex { } private static XContentBuilder createMappingXContent(Map mappings, - Map groupSources, String id, Clock clock) { try { XContentBuilder builder = jsonBuilder().startObject(); builder.startObject(DOC_TYPE); - addProperties(builder, mappings, groupSources); + addProperties(builder, mappings); addMetaData(builder, id, clock); builder.endObject(); // DOC_TYPE return builder.endObject(); @@ -85,8 +80,7 @@ public final class DataframeIndex { } private static XContentBuilder addProperties(XContentBuilder builder, - Map mappings, - Map groupSources) throws IOException { + Map mappings) throws IOException { builder.startObject(PROPERTIES); for (Entry field : mappings.entrySet()) { String fieldName = field.getKey(); @@ -95,13 +89,6 @@ public final class DataframeIndex { builder.startObject(fieldName); builder.field(TYPE, fieldType); - SingleGroupSource groupSource = groupSources.get(fieldName); - if (groupSource instanceof DateHistogramGroupSource) { - String format = ((DateHistogramGroupSource) groupSource).getFormat(); - if (format != null) { - builder.field(FORMAT, DEFAULT_TIME_FORMAT + "||" + format); - } - } builder.endObject(); } builder.endObject(); // PROPERTIES From d8fe0f5c13b97b1b24dcd6bedf6a04a44e380a33 Mon Sep 17 00:00:00 2001 From: James Rodewig Date: Fri, 28 Jun 2019 12:56:22 -0400 Subject: [PATCH 34/42] [DOCS] Rewrite `terms_set` query (#43060) --- .../query-dsl/terms-set-query.asciidoc | 270 ++++++++++++------ 1 file changed, 190 insertions(+), 80 deletions(-) diff --git a/docs/reference/query-dsl/terms-set-query.asciidoc b/docs/reference/query-dsl/terms-set-query.asciidoc index 3ebfb672e20..0f097e494bf 100644 --- a/docs/reference/query-dsl/terms-set-query.asciidoc +++ b/docs/reference/query-dsl/terms-set-query.asciidoc @@ -1,121 +1,231 @@ [[query-dsl-terms-set-query]] === Terms Set Query -Returns any documents that match with at least one or more of the -provided terms. The terms are not analyzed and thus must match exactly. -The number of terms that must match varies per document and is either -controlled by a minimum should match field or computed per document in -a minimum should match script. +Returns documents that contain a minimum number of *exact* terms in a provided +field. -The field that controls the number of required terms that must match must -be a number field: +The `terms_set` query is the same as the <>, except you can define the number of matching terms required to +return a document. For example: + +* A field, `programming_languages`, contains a list of known programming +languages, such as `c++`, `java`, or `php` for job candidates. You can use the +`terms_set` query to return documents that match at least two of these +languages. + +* A field, `permissions`, contains a list of possible user permissions for an +application. You can use the `terms_set` query to return documents that +match a subset of these permissions. + +[[terms-set-query-ex-request]] +==== Example request + +[[terms-set-query-ex-request-index-setup]] +===== Index setup +In most cases, you'll need to include a <> field mapping in +your index to use the `terms_set` query. This numeric field contains the +number of matching terms required to return a document. + +To see how you can set up an index for the `terms_set` query, try the +following example. + +. Create an index, `job-candidates`, with the following field mappings: ++ +-- + +* `name`, a <> field. This field contains the name of the +job candidate. + +* `programming_languages`, a <> field. This field contains +programming languages known by the job candidate. + +* `required_matches`, a <> `long` field. This field contains +the number of matching terms required to return a document. [source,js] --------------------------------------------------- -PUT /my-index +---- +PUT /job-candidates { "mappings": { "properties": { + "name": { + "type": "keyword" + }, + "programming_languages": { + "type": "keyword" + }, "required_matches": { "type": "long" } } } } - -PUT /my-index/_doc/1?refresh -{ - "codes": ["ghi", "jkl"], - "required_matches": 2 -} - -PUT /my-index/_doc/2?refresh -{ - "codes": ["def", "ghi"], - "required_matches": 2 -} --------------------------------------------------- +---- // CONSOLE // TESTSETUP -An example that uses the minimum should match field: +-- + +. Index a document with an ID of `1` and the following values: ++ +-- + +* `Jane Smith` in the `name` field. + +* `["c++", "java"]` in the `programming_languages` field. + +* `2` in the `required_matches` field. + +Include the `?refresh` parameter so the document is immediately available for +search. [source,js] --------------------------------------------------- -GET /my-index/_search +---- +PUT /job-candidates/_doc/1?refresh +{ + "name": "Jane Smith", + "programming_languages": ["c++", "java"], + "required_matches": 2 +} +---- +// CONSOLE + +-- + +. Index another document with an ID of `2` and the following values: ++ +-- + +* `Jason Response` in the `name` field. + +* `["java", "php"]` in the `programming_languages` field. + +* `2` in the `required_matches` field. + +[source,js] +---- +PUT /job-candidates/_doc/2?refresh +{ + "name": "Jason Response", + "programming_languages": ["java", "php"], + "required_matches": 2 +} +---- +// CONSOLE + +-- + +You can now use the `required_matches` field value as the number of +matching terms required to return a document in the `terms_set` query. + +[[terms-set-query-ex-request-query]] +===== Example query + +The following search returns documents where the `programming_languages` field +contains at least two of the following terms: + +* `c++` +* `java` +* `php` + +The `minimum_should_match_field` is `required_matches`. This means the +number of matching terms required is `2`, the value of the `required_matches` +field. + +[source,js] +---- +GET /job-candidates/_search { "query": { "terms_set": { - "codes" : { - "terms" : ["abc", "def", "ghi"], + "programming_languages": { + "terms": ["c++", "java", "php"], "minimum_should_match_field": "required_matches" } } } } --------------------------------------------------- +---- // CONSOLE -Response: +[[terms-set-top-level-params]] +==== Top-level parameters for `terms_set` + +``:: +Field you wish to search. + +[[terms-set-field-params]] +==== Parameters for `` + +`terms`:: ++ +-- +Array of terms you wish to find in the provided ``. To return a document, +a required number of terms must exactly match the field values, including +whitespace and capitalization. + +The required number of matching terms is defined in the +`minimum_should_match_field` or `minimum_should_match_script` parameter. +-- + +`minimum_should_match_field`:: +<> field containing the number of matching terms +required to return a document. + +`minimum_should_match_script`:: ++ +-- +Custom script containing the number of matching terms required to return a +document. + +For parameters and valid values, see <>. + +For an example query using the `minimum_should_match_script` parameter, see +<>. +-- + +[[terms-set-query-notes]] +==== Notes + +[[terms-set-query-script]] +===== How to use the `minimum_should_match_script` parameter +You can use `minimum_should_match_script` to define the required number of +matching terms using a script. This is useful if you need to set the number of +required terms dynamically. + +[[terms-set-query-script-ex]] +====== Example query using `minimum_should_match_script` + +The following search returns documents where the `programming_languages` field +contains at least two of the following terms: + +* `c++` +* `java` +* `php` + +The `source` parameter of this query indicates: + +* The required number of terms to match cannot exceed `params.num_terms`, the +number of terms provided in the `terms` field. +* The required number of terms to match is `2`, the value of the +`required_matches` field. [source,js] --------------------------------------------------- -{ - "took": 13, - "timed_out": false, - "_shards": { - "total": 1, - "successful": 1, - "skipped" : 0, - "failed": 0 - }, - "hits": { - "total" : { - "value": 1, - "relation": "eq" - }, - "max_score": 0.87546873, - "hits": [ - { - "_index": "my-index", - "_type": "_doc", - "_id": "2", - "_score": 0.87546873, - "_source": { - "codes": ["def", "ghi"], - "required_matches": 2 - } - } - ] - } -} --------------------------------------------------- -// TESTRESPONSE[s/"took": 13,/"took": "$body.took",/] - -Scripts can also be used to control how many terms are required to match -in a more dynamic way. For example a create date or a popularity field -can be used as basis for the number of required terms to match. - -Also the `params.num_terms` parameter is available in the script to indicate the -number of terms that have been specified. - -An example that always limits the number of required terms to match to never -become larger than the number of terms specified: - -[source,js] --------------------------------------------------- -GET /my-index/_search +---- +GET /job-candidates/_search { "query": { "terms_set": { - "codes" : { - "terms" : ["abc", "def", "ghi"], + "programming_languages": { + "terms": ["c++", "java", "php"], "minimum_should_match_script": { "source": "Math.min(params.num_terms, doc['required_matches'].value)" - } + }, + "boost": 1.0 } } } } --------------------------------------------------- -// CONSOLE +---- +// CONSOLE \ No newline at end of file From 9d5c66be41eea80683f23c5523f09134dbc09a78 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Fri, 28 Jun 2019 21:14:59 +0200 Subject: [PATCH 35/42] Migrate watcher hlrc response tests to use AbstractResponseTestCase (#43478) Relates to #43472 --- .../client/AbstractResponseTestCase.java | 6 +- .../watcher/ExecuteWatchResponseTests.java | 115 -------------- .../client/watcher/GetWatchResponseTests.java | 143 +++++++----------- .../hlrc/DeleteWatchResponseTests.java | 28 ++-- .../hlrc/ExecuteWatchResponseTests.java | 30 ++-- .../watcher/hlrc/PutWatchResponseTests.java | 32 ++-- 6 files changed, 93 insertions(+), 261 deletions(-) delete mode 100644 client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/ExecuteWatchResponseTests.java diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/AbstractResponseTestCase.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/AbstractResponseTestCase.java index 8565ca14a90..2a91a639a5a 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/AbstractResponseTestCase.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/AbstractResponseTestCase.java @@ -45,7 +45,7 @@ public abstract class AbstractResponseTestCase extends final S serverTestInstance = createServerTestInstance(); final XContentType xContentType = randomFrom(XContentType.values()); - final BytesReference bytes = toShuffledXContent(serverTestInstance, xContentType, ToXContent.EMPTY_PARAMS, randomBoolean()); + final BytesReference bytes = toShuffledXContent(serverTestInstance, xContentType, getParams(), randomBoolean()); final XContent xContent = XContentFactory.xContent(xContentType); final XContentParser parser = xContent.createParser( @@ -62,4 +62,8 @@ public abstract class AbstractResponseTestCase extends protected abstract void assertInstances(S serverTestInstance, C clientInstance); + protected ToXContent.Params getParams() { + return ToXContent.EMPTY_PARAMS; + } + } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/ExecuteWatchResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/ExecuteWatchResponseTests.java deleted file mode 100644 index 3e0ef4c8a5e..00000000000 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/ExecuteWatchResponseTests.java +++ /dev/null @@ -1,115 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.client.watcher; - -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.xcontent.ObjectPath; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.json.JsonXContent; -import org.elasticsearch.test.ESTestCase; - -import java.io.IOException; -import java.io.InputStream; -import java.util.List; -import java.util.Map; - -import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester; -import static org.hamcrest.Matchers.is; - -public class ExecuteWatchResponseTests extends ESTestCase { - - public static final String WATCH_ID_VALUE = "my_watch"; - public static final String NODE_VALUE = "my_node"; - public static final String TRIGGER_TYPE_VALUE = "manual"; - public static final String STATE_VALUE = "executed"; - public static final String STATE_KEY = "state"; - public static final String TRIGGER_EVENT_KEY = "trigger_event"; - public static final String TRIGGER_EVENT_TYPE_KEY = "type"; - public static final String MESSAGES_KEY = "messages"; - public static final String NODE_KEY = "node"; - public static final String WATCH_ID_KEY = "watch_id"; - - public void testFromXContent() throws IOException { - xContentTester(this::createParser, - ExecuteWatchResponseTests::createTestInstance, - this::toXContent, - ExecuteWatchResponse::fromXContent) - .supportsUnknownFields(true) - .assertEqualsConsumer(this::assertEqualInstances) - .assertToXContentEquivalence(false) - .test(); - } - - private void assertEqualInstances(ExecuteWatchResponse expected, ExecuteWatchResponse actual) { - assertThat(expected.getRecordId(), is(actual.getRecordId())); - - // This may have extra json, so lets just assume that if all of the original fields from the creation are there, then its equal - // This is the same code that is in createTestInstance in this class. - Map actualMap = actual.getRecordAsMap(); - assertThat(ObjectPath.eval(WATCH_ID_KEY, actualMap), is(WATCH_ID_VALUE)); - assertThat(ObjectPath.eval(NODE_KEY, actualMap), is(NODE_VALUE)); - List messages = ObjectPath.eval(MESSAGES_KEY, actualMap); - assertThat(messages.size(), is(0)); - assertThat(ObjectPath.eval(TRIGGER_EVENT_KEY + "." + TRIGGER_EVENT_TYPE_KEY, actualMap), is(TRIGGER_TYPE_VALUE)); - assertThat(ObjectPath.eval(STATE_KEY, actualMap), is(STATE_VALUE)); - } - - private XContentBuilder toXContent(BytesReference bytes, XContentBuilder builder) throws IOException { - // EMPTY is safe here because we never use namedObject - try (InputStream stream = bytes.streamInput(); - XContentParser parser = createParser(JsonXContent.jsonXContent, stream)) { - parser.nextToken(); - builder.generator().copyCurrentStructure(parser); - return builder; - } - } - - private XContentBuilder toXContent(ExecuteWatchResponse response, XContentBuilder builder) throws IOException { - builder.startObject(); - builder.field("_id", response.getRecordId()); - builder.field("watch_record"); - toXContent(response.getRecord(), builder); - return builder.endObject(); - } - - private static ExecuteWatchResponse createTestInstance() { - String id = "my_watch_0-2015-06-02T23:17:55.124Z"; - try { - XContentBuilder builder = XContentFactory.jsonBuilder(); - builder.startObject(); - builder.field(WATCH_ID_KEY, WATCH_ID_VALUE); - builder.field(NODE_KEY, NODE_VALUE); - builder.startArray(MESSAGES_KEY); - builder.endArray(); - builder.startObject(TRIGGER_EVENT_KEY); - builder.field(TRIGGER_EVENT_TYPE_KEY, TRIGGER_TYPE_VALUE); - builder.endObject(); - builder.field(STATE_KEY, STATE_VALUE); - builder.endObject(); - BytesReference bytes = BytesReference.bytes(builder); - return new ExecuteWatchResponse(id, bytes); - } - catch (IOException e) { - throw new AssertionError(e); - } - } -} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/GetWatchResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/GetWatchResponseTests.java index 9ab115371a0..c823da5acac 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/GetWatchResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/GetWatchResponseTests.java @@ -19,16 +19,12 @@ package org.elasticsearch.client.watcher; import org.apache.lucene.util.LuceneTestCase; +import org.elasticsearch.client.AbstractResponseTestCase; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.xcontent.DeprecationHandler; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContent; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.client.AbstractHlrcStreamableXContentTestCase; import org.elasticsearch.xpack.core.watcher.actions.ActionStatus; import org.elasticsearch.xpack.core.watcher.execution.ExecutionState; import org.elasticsearch.xpack.core.watcher.support.xcontent.XContentSource; @@ -36,7 +32,6 @@ import org.elasticsearch.xpack.core.watcher.transport.actions.get.GetWatchRespon import org.elasticsearch.xpack.core.watcher.watch.WatchStatus; import java.io.IOException; -import java.io.InputStream; import java.time.Clock; import java.time.Instant; import java.time.ZoneOffset; @@ -44,65 +39,14 @@ import java.time.ZonedDateTime; import java.util.Collections; import java.util.HashMap; import java.util.Map; -import java.util.function.Predicate; -public class GetWatchResponseTests extends - AbstractHlrcStreamableXContentTestCase { +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; - private static final String[] SHUFFLE_FIELDS_EXCEPTION = new String[] { "watch" }; +public class GetWatchResponseTests extends AbstractResponseTestCase { @Override - protected String[] getShuffleFieldsExceptions() { - return SHUFFLE_FIELDS_EXCEPTION; - } - - @Override - protected ToXContent.Params getToXContentParams() { - return new ToXContent.MapParams(Collections.singletonMap("hide_headers", "false")); - } - - @Override - protected Predicate getRandomFieldsExcludeFilter() { - return f -> f.contains("watch") || f.contains("actions") || f.contains("headers"); - } - - @Override - protected void assertEqualInstances(GetWatchResponse expectedInstance, GetWatchResponse newInstance) { - if (expectedInstance.isFound() && - expectedInstance.getSource().getContentType() != newInstance.getSource().getContentType()) { - /** - * The {@link GetWatchResponse#getContentType()} depends on the content type that - * was used to serialize the main object so we use the same content type than the - * expectedInstance to translate the watch of the newInstance. - */ - XContent from = XContentFactory.xContent(newInstance.getSource().getContentType()); - XContent to = XContentFactory.xContent(expectedInstance.getSource().getContentType()); - final BytesReference newSource; - // It is safe to use EMPTY here because this never uses namedObject - try (InputStream stream = newInstance.getSource().getBytes().streamInput(); - XContentParser parser = XContentFactory.xContent(from.type()).createParser(NamedXContentRegistry.EMPTY, - DeprecationHandler.THROW_UNSUPPORTED_OPERATION, stream)) { - parser.nextToken(); - XContentBuilder builder = XContentFactory.contentBuilder(to.type()); - builder.copyCurrentStructure(parser); - newSource = BytesReference.bytes(builder); - } catch (IOException e) { - throw new AssertionError(e); - } - newInstance = new GetWatchResponse(newInstance.getId(), newInstance.getVersion(), - newInstance.getSeqNo(), newInstance.getPrimaryTerm(), - newInstance.getStatus(), new XContentSource(newSource, expectedInstance.getSource().getContentType())); - } - super.assertEqualInstances(expectedInstance, newInstance); - } - - @Override - protected GetWatchResponse createBlankInstance() { - return new GetWatchResponse(); - } - - @Override - protected GetWatchResponse createTestInstance() { + protected GetWatchResponse createServerTestInstance() { String id = randomAlphaOfLength(10); if (LuceneTestCase.rarely()) { return new GetWatchResponse(id); @@ -115,6 +59,34 @@ public class GetWatchResponseTests extends return new GetWatchResponse(id, version, seqNo, primaryTerm, status, new XContentSource(source, XContentType.JSON)); } + @Override + protected org.elasticsearch.client.watcher.GetWatchResponse doParseToClientInstance(XContentParser parser) throws IOException { + return org.elasticsearch.client.watcher.GetWatchResponse.fromXContent(parser); + } + + @Override + protected void assertInstances(GetWatchResponse serverTestInstance, org.elasticsearch.client.watcher.GetWatchResponse clientInstance) { + assertThat(clientInstance.getId(), equalTo(serverTestInstance.getId())); + assertThat(clientInstance.getSeqNo(), equalTo(serverTestInstance.getSeqNo())); + assertThat(clientInstance.getPrimaryTerm(), equalTo(serverTestInstance.getPrimaryTerm())); + assertThat(clientInstance.getVersion(), equalTo(serverTestInstance.getVersion())); + if (serverTestInstance.getStatus() != null) { + assertThat(convertWatchStatus(clientInstance.getStatus()), equalTo(serverTestInstance.getStatus())); + } else { + assertThat(clientInstance.getStatus(), nullValue()); + } + if (serverTestInstance.getSource() != null) { + assertThat(clientInstance.getSourceAsMap(), equalTo(serverTestInstance.getSource().getAsMap())); + } else { + assertThat(clientInstance.getSource(), nullValue()); + } + } + + @Override + protected ToXContent.Params getParams() { + return new ToXContent.MapParams(Collections.singletonMap("hide_headers", "false")); + } + private static BytesReference simpleWatch() { try { XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent()); @@ -181,58 +153,45 @@ public class GetWatchResponseTests extends } } - @Override - public org.elasticsearch.client.watcher.GetWatchResponse doHlrcParseInstance(XContentParser parser) throws IOException { - return org.elasticsearch.client.watcher.GetWatchResponse.fromXContent(parser); - } - - @Override - public GetWatchResponse convertHlrcToInternal(org.elasticsearch.client.watcher.GetWatchResponse instance) { - if (instance.isFound()) { - return new GetWatchResponse(instance.getId(), instance.getVersion(), instance.getSeqNo(), instance.getPrimaryTerm(), - convertHlrcToInternal(instance.getStatus()), new XContentSource(instance.getSource(), instance.getContentType())); - } else { - return new GetWatchResponse(instance.getId()); - } - } - - private static WatchStatus convertHlrcToInternal(org.elasticsearch.client.watcher.WatchStatus status) { + private static WatchStatus convertWatchStatus(org.elasticsearch.client.watcher.WatchStatus status) { final Map actions = new HashMap<>(); for (Map.Entry entry : status.getActions().entrySet()) { - actions.put(entry.getKey(), convertHlrcToInternal(entry.getValue())); + actions.put(entry.getKey(), convertActionStatus(entry.getValue())); } return new WatchStatus(status.version(), - convertHlrcToInternal(status.state()), - status.getExecutionState() == null ? null : convertHlrcToInternal(status.getExecutionState()), + convertWatchStatusState(status.state()), + status.getExecutionState() == null ? null : convertWatchStatus(status.getExecutionState()), status.lastChecked(), status.lastMetCondition(), actions, status.getHeaders() ); } - private static ActionStatus convertHlrcToInternal(org.elasticsearch.client.watcher.ActionStatus actionStatus) { - return new ActionStatus(convertHlrcToInternal(actionStatus.ackStatus()), - actionStatus.lastExecution() == null ? null : convertHlrcToInternal(actionStatus.lastExecution()), - actionStatus.lastSuccessfulExecution() == null ? null : convertHlrcToInternal(actionStatus.lastSuccessfulExecution()), - actionStatus.lastThrottle() == null ? null : convertHlrcToInternal(actionStatus.lastThrottle()) + private static ActionStatus convertActionStatus(org.elasticsearch.client.watcher.ActionStatus actionStatus) { + return new ActionStatus(convertAckStatus(actionStatus.ackStatus()), + actionStatus.lastExecution() == null ? null : convertActionStatusExecution(actionStatus.lastExecution()), + actionStatus.lastSuccessfulExecution() == null ? null : convertActionStatusExecution(actionStatus.lastSuccessfulExecution()), + actionStatus.lastThrottle() == null ? null : convertActionStatusThrottle(actionStatus.lastThrottle()) ); } - private static ActionStatus.AckStatus convertHlrcToInternal(org.elasticsearch.client.watcher.ActionStatus.AckStatus ackStatus) { - return new ActionStatus.AckStatus(ackStatus.timestamp(), convertHlrcToInternal(ackStatus.state())); + private static ActionStatus.AckStatus convertAckStatus(org.elasticsearch.client.watcher.ActionStatus.AckStatus ackStatus) { + return new ActionStatus.AckStatus(ackStatus.timestamp(), convertAckStatusState(ackStatus.state())); } - private static ActionStatus.AckStatus.State convertHlrcToInternal(org.elasticsearch.client.watcher.ActionStatus.AckStatus.State state) { + private static ActionStatus.AckStatus.State convertAckStatusState( + org.elasticsearch.client.watcher.ActionStatus.AckStatus.State state) { return ActionStatus.AckStatus.State.valueOf(state.name()); } - private static WatchStatus.State convertHlrcToInternal(org.elasticsearch.client.watcher.WatchStatus.State state) { + private static WatchStatus.State convertWatchStatusState(org.elasticsearch.client.watcher.WatchStatus.State state) { return new WatchStatus.State(state.isActive(), state.getTimestamp()); } - private static ExecutionState convertHlrcToInternal(org.elasticsearch.client.watcher.ExecutionState executionState) { + private static ExecutionState convertWatchStatus(org.elasticsearch.client.watcher.ExecutionState executionState) { return ExecutionState.valueOf(executionState.name()); } - private static ActionStatus.Execution convertHlrcToInternal(org.elasticsearch.client.watcher.ActionStatus.Execution execution) { + private static ActionStatus.Execution convertActionStatusExecution( + org.elasticsearch.client.watcher.ActionStatus.Execution execution) { if (execution.successful()) { return ActionStatus.Execution.successful(execution.timestamp()); } else { @@ -240,7 +199,7 @@ public class GetWatchResponseTests extends } } - private static ActionStatus.Throttle convertHlrcToInternal(org.elasticsearch.client.watcher.ActionStatus.Throttle throttle) { + private static ActionStatus.Throttle convertActionStatusThrottle(org.elasticsearch.client.watcher.ActionStatus.Throttle throttle) { return new ActionStatus.Throttle(throttle.timestamp(), throttle.reason()); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/hlrc/DeleteWatchResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/hlrc/DeleteWatchResponseTests.java index eebf2c9cef1..493375c4517 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/hlrc/DeleteWatchResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/hlrc/DeleteWatchResponseTests.java @@ -18,17 +18,19 @@ */ package org.elasticsearch.client.watcher.hlrc; +import org.elasticsearch.client.AbstractResponseTestCase; import org.elasticsearch.client.watcher.DeleteWatchResponse; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.client.AbstractHlrcXContentTestCase; import java.io.IOException; -public class DeleteWatchResponseTests extends AbstractHlrcXContentTestCase< +import static org.hamcrest.Matchers.equalTo; + +public class DeleteWatchResponseTests extends AbstractResponseTestCase< org.elasticsearch.protocol.xpack.watcher.DeleteWatchResponse, DeleteWatchResponse> { @Override - protected org.elasticsearch.protocol.xpack.watcher.DeleteWatchResponse createTestInstance() { + protected org.elasticsearch.protocol.xpack.watcher.DeleteWatchResponse createServerTestInstance() { String id = randomAlphaOfLength(10); long version = randomLongBetween(1, 10); boolean found = randomBoolean(); @@ -36,23 +38,15 @@ public class DeleteWatchResponseTests extends AbstractHlrcXContentTestCase< } @Override - protected org.elasticsearch.protocol.xpack.watcher.DeleteWatchResponse doParseInstance(XContentParser parser) throws IOException { - return org.elasticsearch.protocol.xpack.watcher.DeleteWatchResponse.fromXContent(parser); - } - - @Override - public DeleteWatchResponse doHlrcParseInstance(XContentParser parser) throws IOException { + protected DeleteWatchResponse doParseToClientInstance(XContentParser parser) throws IOException { return DeleteWatchResponse.fromXContent(parser); } @Override - public org.elasticsearch.protocol.xpack.watcher.DeleteWatchResponse convertHlrcToInternal(DeleteWatchResponse instance) { - return new org.elasticsearch.protocol.xpack.watcher.DeleteWatchResponse(instance.getId(), instance.getVersion(), - instance.isFound()); - } - - @Override - protected boolean supportsUnknownFields() { - return false; + protected void assertInstances(org.elasticsearch.protocol.xpack.watcher.DeleteWatchResponse serverTestInstance, + DeleteWatchResponse clientInstance) { + assertThat(clientInstance.getId(), equalTo(serverTestInstance.getId())); + assertThat(clientInstance.getVersion(), equalTo(serverTestInstance.getVersion())); + assertThat(clientInstance.isFound(), equalTo(serverTestInstance.isFound())); } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/hlrc/ExecuteWatchResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/hlrc/ExecuteWatchResponseTests.java index ace75517a93..c1492eb5302 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/hlrc/ExecuteWatchResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/hlrc/ExecuteWatchResponseTests.java @@ -19,31 +19,23 @@ package org.elasticsearch.client.watcher.hlrc; +import org.elasticsearch.client.AbstractResponseTestCase; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.client.AbstractHlrcXContentTestCase; import org.elasticsearch.xpack.core.watcher.transport.actions.execute.ExecuteWatchResponse; import java.io.IOException; -public class ExecuteWatchResponseTests - extends AbstractHlrcXContentTestCase { +import static org.hamcrest.Matchers.equalTo; + +public class ExecuteWatchResponseTests extends AbstractResponseTestCase< + ExecuteWatchResponse, org.elasticsearch.client.watcher.ExecuteWatchResponse> { @Override - public org.elasticsearch.client.watcher.ExecuteWatchResponse doHlrcParseInstance(XContentParser parser) throws IOException { - return org.elasticsearch.client.watcher.ExecuteWatchResponse.fromXContent(parser); - } - - @Override - public ExecuteWatchResponse convertHlrcToInternal(org.elasticsearch.client.watcher.ExecuteWatchResponse instance) { - return new ExecuteWatchResponse(instance.getRecordId(), instance.getRecord(), XContentType.JSON); - } - - @Override - protected ExecuteWatchResponse createTestInstance() { + protected ExecuteWatchResponse createServerTestInstance() { String id = "my_watch_0-2015-06-02T23:17:55.124Z"; try { XContentBuilder builder = XContentFactory.jsonBuilder(); @@ -66,12 +58,14 @@ public class ExecuteWatchResponseTests } @Override - protected ExecuteWatchResponse doParseInstance(XContentParser parser) throws IOException { - return ExecuteWatchResponse.fromXContent(parser); + protected org.elasticsearch.client.watcher.ExecuteWatchResponse doParseToClientInstance(XContentParser parser) throws IOException { + return org.elasticsearch.client.watcher.ExecuteWatchResponse.fromXContent(parser); } @Override - protected boolean supportsUnknownFields() { - return false; + protected void assertInstances(ExecuteWatchResponse serverTestInstance, + org.elasticsearch.client.watcher.ExecuteWatchResponse clientInstance) { + assertThat(clientInstance.getRecordId(), equalTo(serverTestInstance.getRecordId())); + assertThat(clientInstance.getRecordAsMap(), equalTo(serverTestInstance.getRecordSource().getAsMap())); } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/hlrc/PutWatchResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/hlrc/PutWatchResponseTests.java index 9b65618cafc..a47de0d15fd 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/hlrc/PutWatchResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/hlrc/PutWatchResponseTests.java @@ -18,17 +18,19 @@ */ package org.elasticsearch.client.watcher.hlrc; +import org.elasticsearch.client.AbstractResponseTestCase; import org.elasticsearch.client.watcher.PutWatchResponse; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.client.AbstractHlrcXContentTestCase; import java.io.IOException; -public class PutWatchResponseTests extends AbstractHlrcXContentTestCase< +import static org.hamcrest.Matchers.equalTo; + +public class PutWatchResponseTests extends AbstractResponseTestCase< org.elasticsearch.protocol.xpack.watcher.PutWatchResponse, PutWatchResponse> { @Override - protected org.elasticsearch.protocol.xpack.watcher.PutWatchResponse createTestInstance() { + protected org.elasticsearch.protocol.xpack.watcher.PutWatchResponse createServerTestInstance() { String id = randomAlphaOfLength(10); long seqNo = randomNonNegativeLong(); long primaryTerm = randomLongBetween(1, 20); @@ -38,23 +40,17 @@ public class PutWatchResponseTests extends AbstractHlrcXContentTestCase< } @Override - protected org.elasticsearch.protocol.xpack.watcher.PutWatchResponse doParseInstance(XContentParser parser) throws IOException { - return org.elasticsearch.protocol.xpack.watcher.PutWatchResponse.fromXContent(parser); + protected PutWatchResponse doParseToClientInstance(XContentParser parser) throws IOException { + return PutWatchResponse.fromXContent(parser); } @Override - public PutWatchResponse doHlrcParseInstance(XContentParser parser) throws IOException { - return org.elasticsearch.client.watcher.PutWatchResponse.fromXContent(parser); - } - - @Override - public org.elasticsearch.protocol.xpack.watcher.PutWatchResponse convertHlrcToInternal(PutWatchResponse instance) { - return new org.elasticsearch.protocol.xpack.watcher.PutWatchResponse(instance.getId(), instance.getVersion(), - instance.getSeqNo(), instance.getPrimaryTerm(), instance.isCreated()); - } - - @Override - protected boolean supportsUnknownFields() { - return false; + protected void assertInstances(org.elasticsearch.protocol.xpack.watcher.PutWatchResponse serverTestInstance, + PutWatchResponse clientInstance) { + assertThat(clientInstance.getId(), equalTo(serverTestInstance.getId())); + assertThat(clientInstance.getSeqNo(), equalTo(serverTestInstance.getSeqNo())); + assertThat(clientInstance.getPrimaryTerm(), equalTo(serverTestInstance.getPrimaryTerm())); + assertThat(clientInstance.getVersion(), equalTo(serverTestInstance.getVersion())); + assertThat(clientInstance.isCreated(), equalTo(serverTestInstance.isCreated())); } } From 7951c63b919d97a1a64e8f8abd7f64a8a14c49c9 Mon Sep 17 00:00:00 2001 From: David Roberts Date: Fri, 28 Jun 2019 20:06:22 +0100 Subject: [PATCH 36/42] [ML] Mark ml-cpp dependency as regularly changing (#43760) Since #41817 was merged the ml-cpp zip file for any given version has been cached indefinitely by Gradle. This is problematic, particularly in the case of the master branch where the version 8.0.0-SNAPSHOT will be in use for more than a year. This change tells Gradle that the ml-cpp zip file is a "changing" dependency, and to check whether it has changed every two hours. Two hours is a compromise between checking on every build and annoying developers with slow internet connections and checking rarely causing bug fixes in the ml-cpp code to take a long time to propagate through to elasticsearch PRs that rely on them. --- x-pack/plugin/ml/build.gradle | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/ml/build.gradle b/x-pack/plugin/ml/build.gradle index 660a09c6b94..82c7b138ec9 100644 --- a/x-pack/plugin/ml/build.gradle +++ b/x-pack/plugin/ml/build.gradle @@ -31,6 +31,7 @@ configurations { substitute module("org.elasticsearch.ml:ml-cpp") with project(":ml-cpp") } } + resolutionStrategy.cacheChangingModulesFor 2, 'hours' } } @@ -59,7 +60,9 @@ dependencies { compile project(':libs:elasticsearch-grok') compile "com.ibm.icu:icu4j:${versions.icu4j}" compile "net.sf.supercsv:super-csv:${versions.supercsv}" - nativeBundle "org.elasticsearch.ml:ml-cpp:${project.version}@zip" + nativeBundle("org.elasticsearch.ml:ml-cpp:${project.version}@zip") { + changing = true + } testCompile 'org.ini4j:ini4j:0.5.2' } From 28ab77a023cfea977ba83ae089a4061f7bb34621 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Fri, 28 Jun 2019 21:40:00 -0700 Subject: [PATCH 37/42] Add StreamableResponseAction to aid in deprecation of Streamable (#43770) The Action base class currently works for both Streamable and Writeable response types. This commit intorduces StreamableResponseAction, for which only the legacy Action implementions which provide newResponse() will extend. This eliminates the need for overriding newResponse() with an UnsupportedOperationException. relates #34389 --- .../noop/action/bulk/NoopBulkAction.java | 4 +- .../noop/action/search/NoopSearchAction.java | 5 -- .../ingest/common/GrokProcessorGetAction.java | 4 +- .../mustache/MultiSearchTemplateAction.java | 5 -- .../script/mustache/SearchTemplateAction.java | 5 -- .../action/PainlessContextAction.java | 5 -- .../action/PainlessExecuteAction.java | 4 +- .../index/rankeval/RankEvalAction.java | 4 +- .../index/reindex/RethrottleAction.java | 5 -- .../java/org/elasticsearch/action/Action.java | 28 +++++----- .../action/StreamableResponseAction.java | 51 +++++++++++++++++++ .../ClusterAllocationExplainAction.java | 4 +- .../AddVotingConfigExclusionsAction.java | 5 -- .../ClearVotingConfigExclusionsAction.java | 5 -- .../cluster/health/ClusterHealthAction.java | 4 +- .../hotthreads/NodesHotThreadsAction.java | 4 +- .../cluster/node/info/NodesInfoAction.java | 4 +- .../NodesReloadSecureSettingsAction.java | 4 +- .../cluster/node/stats/NodesStatsAction.java | 4 +- .../node/tasks/cancel/CancelTasksAction.java | 5 -- .../cluster/node/tasks/get/GetTaskAction.java | 4 +- .../node/tasks/list/ListTasksAction.java | 5 -- .../cluster/node/usage/NodesUsageAction.java | 4 +- .../cluster/remote/RemoteInfoAction.java | 4 +- .../delete/DeleteRepositoryAction.java | 5 -- .../get/GetRepositoriesAction.java | 4 +- .../repositories/put/PutRepositoryAction.java | 5 -- .../verify/VerifyRepositoryAction.java | 4 +- .../cluster/reroute/ClusterRerouteAction.java | 5 -- .../settings/ClusterUpdateSettingsAction.java | 5 -- .../shards/ClusterSearchShardsAction.java | 5 -- .../create/CreateSnapshotAction.java | 4 +- .../delete/DeleteSnapshotAction.java | 5 -- .../snapshots/get/GetSnapshotsAction.java | 4 +- .../restore/RestoreSnapshotAction.java | 4 +- .../status/SnapshotsStatusAction.java | 4 +- .../cluster/state/ClusterStateAction.java | 4 +- .../cluster/stats/ClusterStatsAction.java | 4 +- .../DeleteStoredScriptAction.java | 5 -- .../storedscripts/GetStoredScriptAction.java | 4 +- .../storedscripts/PutStoredScriptAction.java | 5 -- .../tasks/PendingClusterTasksAction.java | 4 +- .../indices/alias/IndicesAliasesAction.java | 5 -- .../alias/exists/AliasesExistAction.java | 4 +- .../indices/alias/get/GetAliasesAction.java | 4 +- .../admin/indices/analyze/AnalyzeAction.java | 5 -- .../cache/clear/ClearIndicesCacheAction.java | 4 +- .../admin/indices/close/CloseIndexAction.java | 5 -- .../indices/create/CreateIndexAction.java | 5 -- .../indices/delete/DeleteIndexAction.java | 5 -- .../exists/indices/IndicesExistsAction.java | 4 +- .../exists/types/TypesExistsAction.java | 4 +- .../admin/indices/flush/FlushAction.java | 4 +- .../indices/flush/SyncedFlushAction.java | 4 +- .../indices/forcemerge/ForceMergeAction.java | 4 +- .../admin/indices/get/GetIndexAction.java | 4 +- .../mapping/get/GetFieldMappingsAction.java | 5 -- .../mapping/get/GetMappingsAction.java | 4 +- .../indices/mapping/put/PutMappingAction.java | 5 -- .../admin/indices/open/OpenIndexAction.java | 5 -- .../indices/recovery/RecoveryAction.java | 4 +- .../admin/indices/refresh/RefreshAction.java | 4 +- .../indices/rollover/RolloverAction.java | 5 -- .../segments/IndicesSegmentsAction.java | 4 +- .../settings/get/GetSettingsAction.java | 4 +- .../settings/put/UpdateSettingsAction.java | 5 -- .../shards/IndicesShardStoresAction.java | 4 +- .../admin/indices/shrink/ResizeAction.java | 5 -- .../admin/indices/shrink/ShrinkAction.java | 5 -- .../indices/stats/IndicesStatsAction.java | 4 +- .../delete/DeleteIndexTemplateAction.java | 5 -- .../template/get/GetIndexTemplatesAction.java | 4 +- .../template/put/PutIndexTemplateAction.java | 5 -- .../upgrade/get/UpgradeStatusAction.java | 4 +- .../indices/upgrade/post/UpgradeAction.java | 4 +- .../upgrade/post/UpgradeSettingsAction.java | 5 -- .../validate/query/ValidateQueryAction.java | 4 +- .../elasticsearch/action/bulk/BulkAction.java | 4 +- .../action/delete/DeleteAction.java | 4 +- .../action/explain/ExplainAction.java | 5 -- .../fieldcaps/FieldCapabilitiesAction.java | 4 +- .../elasticsearch/action/get/GetAction.java | 5 -- .../action/get/MultiGetAction.java | 4 +- .../action/index/IndexAction.java | 4 +- .../action/ingest/DeletePipelineAction.java | 5 -- .../action/ingest/GetPipelineAction.java | 4 +- .../action/ingest/PutPipelineAction.java | 5 -- .../action/ingest/SimulatePipelineAction.java | 4 +- .../elasticsearch/action/main/MainAction.java | 4 +- .../action/search/ClearScrollAction.java | 4 +- .../action/search/MultiSearchAction.java | 5 -- .../action/search/SearchAction.java | 5 -- .../action/search/SearchScrollAction.java | 5 -- .../termvectors/MultiTermVectorsAction.java | 4 +- .../action/termvectors/TermVectorsAction.java | 5 -- .../action/update/UpdateAction.java | 4 +- .../index/reindex/DeleteByQueryAction.java | 4 +- .../index/reindex/ReindexAction.java | 4 +- .../index/reindex/UpdateByQueryAction.java | 4 +- .../index/seqno/RetentionLeaseActions.java | 8 +-- .../CompletionPersistentTaskAction.java | 4 +- .../RemovePersistentTaskAction.java | 4 +- .../persistent/StartPersistentTaskAction.java | 4 +- .../UpdatePersistentTaskStatusAction.java | 4 +- .../action/ActionModuleTests.java | 2 +- .../org/elasticsearch/action/ActionTests.java | 2 +- .../cluster/node/tasks/TestTaskPlugin.java | 8 +-- .../InternalOrPrivateSettingsPlugin.java | 4 +- .../persistent/TestPersistentTasksPlugin.java | 5 -- .../xpack/ccr/action/ShardChangesAction.java | 5 -- .../bulk/BulkShardOperationsAction.java | 4 +- .../ClearCcrRestoreSessionAction.java | 5 -- .../DeleteInternalCcrRepositoryAction.java | 5 -- .../GetCcrRestoreFileChunkAction.java | 5 -- .../PutCcrRestoreSessionAction.java | 5 -- .../PutInternalCcrRepositoryAction.java | 5 -- .../license/DeleteLicenseAction.java | 5 -- .../license/GetBasicStatusAction.java | 4 +- .../license/GetLicenseAction.java | 4 +- .../license/GetTrialStatusAction.java | 4 +- .../license/PostStartBasicAction.java | 5 -- .../license/PostStartTrialAction.java | 4 +- .../license/PutLicenseAction.java | 5 -- .../core/action/ReloadAnalyzerAction.java | 4 +- .../action/TransportFreezeIndexAction.java | 5 -- .../xpack/core/action/XPackInfoAction.java | 4 +- .../xpack/core/action/XPackUsageAction.java | 4 +- .../xpack/core/ccr/action/CcrStatsAction.java | 5 -- .../action/DeleteAutoFollowPatternAction.java | 5 -- .../core/ccr/action/FollowInfoAction.java | 5 -- .../core/ccr/action/FollowStatsAction.java | 5 -- .../core/ccr/action/ForgetFollowerAction.java | 4 +- .../action/GetAutoFollowPatternAction.java | 5 -- .../core/ccr/action/PauseFollowAction.java | 5 -- .../action/PutAutoFollowPatternAction.java | 5 -- .../core/ccr/action/PutFollowAction.java | 5 -- .../core/ccr/action/ResumeFollowAction.java | 5 -- .../xpack/core/ccr/action/UnfollowAction.java | 5 -- .../DeleteDataFrameTransformAction.java | 5 -- .../action/GetDataFrameTransformsAction.java | 5 -- .../GetDataFrameTransformsStatsAction.java | 5 -- .../PreviewDataFrameTransformAction.java | 5 -- .../action/PutDataFrameTransformAction.java | 5 -- .../action/StartDataFrameTransformAction.java | 5 -- .../StartDataFrameTransformTaskAction.java | 5 -- .../action/StopDataFrameTransformAction.java | 5 -- .../deprecation/DeprecationInfoAction.java | 4 +- .../NodesDeprecationCheckAction.java | 3 +- .../core/graph/action/GraphExploreAction.java | 4 +- .../action/DeleteLifecycleAction.java | 5 -- .../action/ExplainLifecycleAction.java | 4 +- .../action/GetLifecycleAction.java | 4 +- .../action/GetStatusAction.java | 4 +- .../action/MoveToStepAction.java | 5 -- .../action/PutLifecycleAction.java | 5 -- .../RemoveIndexLifecyclePolicyAction.java | 4 +- .../indexlifecycle/action/RetryAction.java | 5 -- .../indexlifecycle/action/StartILMAction.java | 5 -- .../indexlifecycle/action/StopILMAction.java | 5 -- .../xpack/core/ml/action/CloseJobAction.java | 5 -- .../core/ml/action/DeleteCalendarAction.java | 5 -- .../ml/action/DeleteCalendarEventAction.java | 5 -- .../DeleteDataFrameAnalyticsAction.java | 5 -- .../core/ml/action/DeleteDatafeedAction.java | 5 -- .../ml/action/DeleteExpiredDataAction.java | 4 +- .../core/ml/action/DeleteFilterAction.java | 5 -- .../core/ml/action/DeleteForecastAction.java | 5 -- .../xpack/core/ml/action/DeleteJobAction.java | 5 -- .../ml/action/DeleteModelSnapshotAction.java | 5 -- .../ml/action/EvaluateDataFrameAction.java | 4 +- .../ml/action/FinalizeJobExecutionAction.java | 5 -- .../ml/action/FindFileStructureAction.java | 4 +- .../xpack/core/ml/action/FlushJobAction.java | 5 -- .../core/ml/action/ForecastJobAction.java | 5 -- .../core/ml/action/GetBucketsAction.java | 4 +- .../ml/action/GetCalendarEventsAction.java | 4 +- .../core/ml/action/GetCalendarsAction.java | 4 +- .../core/ml/action/GetCategoriesAction.java | 4 +- .../action/GetDataFrameAnalyticsAction.java | 4 +- .../GetDataFrameAnalyticsStatsAction.java | 5 -- .../core/ml/action/GetDatafeedsAction.java | 4 +- .../ml/action/GetDatafeedsStatsAction.java | 4 +- .../core/ml/action/GetFiltersAction.java | 5 +- .../core/ml/action/GetInfluencersAction.java | 5 +- .../xpack/core/ml/action/GetJobsAction.java | 4 +- .../core/ml/action/GetJobsStatsAction.java | 5 -- .../ml/action/GetModelSnapshotsAction.java | 4 +- .../ml/action/GetOverallBucketsAction.java | 4 +- .../core/ml/action/GetRecordsAction.java | 4 +- .../core/ml/action/IsolateDatafeedAction.java | 5 -- .../core/ml/action/KillProcessAction.java | 5 -- .../xpack/core/ml/action/MlInfoAction.java | 4 +- .../xpack/core/ml/action/OpenJobAction.java | 5 -- .../core/ml/action/PersistJobAction.java | 5 -- .../ml/action/PostCalendarEventsAction.java | 4 +- .../xpack/core/ml/action/PostDataAction.java | 5 -- .../core/ml/action/PreviewDatafeedAction.java | 4 +- .../core/ml/action/PutCalendarAction.java | 4 +- .../action/PutDataFrameAnalyticsAction.java | 4 +- .../core/ml/action/PutDatafeedAction.java | 4 +- .../xpack/core/ml/action/PutFilterAction.java | 4 +- .../xpack/core/ml/action/PutJobAction.java | 4 +- .../ml/action/RevertModelSnapshotAction.java | 4 +- .../core/ml/action/SetUpgradeModeAction.java | 5 -- .../action/StartDataFrameAnalyticsAction.java | 5 -- .../core/ml/action/StartDatafeedAction.java | 5 -- .../action/StopDataFrameAnalyticsAction.java | 5 -- .../core/ml/action/StopDatafeedAction.java | 5 -- .../ml/action/UpdateCalendarJobAction.java | 4 +- .../core/ml/action/UpdateDatafeedAction.java | 4 +- .../core/ml/action/UpdateFilterAction.java | 4 +- .../xpack/core/ml/action/UpdateJobAction.java | 4 +- .../ml/action/UpdateModelSnapshotAction.java | 4 +- .../core/ml/action/UpdateProcessAction.java | 5 -- .../ml/action/ValidateDetectorAction.java | 5 -- .../ml/action/ValidateJobConfigAction.java | 5 -- .../action/MonitoringBulkAction.java | 4 +- .../rollup/action/DeleteRollupJobAction.java | 5 -- .../rollup/action/GetRollupCapsAction.java | 5 +- .../action/GetRollupIndexCapsAction.java | 5 +- .../rollup/action/GetRollupJobsAction.java | 5 -- .../rollup/action/PutRollupJobAction.java | 5 -- .../rollup/action/RollupSearchAction.java | 5 -- .../rollup/action/StartRollupJobAction.java | 5 -- .../rollup/action/StopRollupJobAction.java | 5 -- .../security/action/CreateApiKeyAction.java | 5 -- .../core/security/action/GetApiKeyAction.java | 5 -- .../action/InvalidateApiKeyAction.java | 5 -- .../oidc/OpenIdConnectAuthenticateAction.java | 5 -- .../oidc/OpenIdConnectLogoutAction.java | 5 -- ...nIdConnectPrepareAuthenticationAction.java | 5 -- .../privilege/DeletePrivilegesAction.java | 4 +- .../action/privilege/GetPrivilegesAction.java | 4 +- .../action/privilege/PutPrivilegesAction.java | 4 +- .../action/realm/ClearRealmCacheAction.java | 4 +- .../action/role/ClearRolesCacheAction.java | 4 +- .../action/role/DeleteRoleAction.java | 4 +- .../security/action/role/GetRolesAction.java | 4 +- .../security/action/role/PutRoleAction.java | 4 +- .../rolemapping/DeleteRoleMappingAction.java | 4 +- .../rolemapping/GetRoleMappingsAction.java | 4 +- .../rolemapping/PutRoleMappingAction.java | 4 +- .../action/saml/SamlAuthenticateAction.java | 4 +- .../saml/SamlInvalidateSessionAction.java | 4 +- .../action/saml/SamlLogoutAction.java | 4 +- .../saml/SamlPrepareAuthenticationAction.java | 4 +- .../action/token/CreateTokenAction.java | 4 +- .../action/token/InvalidateTokenAction.java | 4 +- .../action/token/RefreshTokenAction.java | 6 +-- .../action/user/AuthenticateAction.java | 4 +- .../action/user/ChangePasswordAction.java | 4 +- .../action/user/DeleteUserAction.java | 4 +- .../action/user/GetUserPrivilegesAction.java | 4 +- .../security/action/user/GetUsersAction.java | 4 +- .../action/user/HasPrivilegesAction.java | 4 +- .../security/action/user/PutUserAction.java | 4 +- .../action/user/SetEnabledAction.java | 4 +- .../ssl/action/GetCertificateInfoAction.java | 4 +- .../upgrade/actions/IndexUpgradeAction.java | 4 +- .../actions/IndexUpgradeInfoAction.java | 4 +- .../transport/actions/ack/AckWatchAction.java | 4 +- .../actions/activate/ActivateWatchAction.java | 4 +- .../actions/delete/DeleteWatchAction.java | 4 +- .../actions/execute/ExecuteWatchAction.java | 4 +- .../transport/actions/get/GetWatchAction.java | 4 +- .../transport/actions/put/PutWatchAction.java | 4 +- .../actions/service/WatcherServiceAction.java | 5 -- .../actions/stats/WatcherStatsAction.java | 4 +- .../ml/job/persistence/MockClientBuilder.java | 3 +- .../sql/action/SqlClearCursorAction.java | 4 +- .../xpack/sql/action/SqlQueryAction.java | 4 +- .../xpack/sql/action/SqlTranslateAction.java | 4 +- .../xpack/sql/plugin/SqlStatsAction.java | 4 +- 273 files changed, 378 insertions(+), 908 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/action/StreamableResponseAction.java diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/NoopBulkAction.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/NoopBulkAction.java index 2bfd3b0cc8e..40c65aee700 100644 --- a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/NoopBulkAction.java +++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/NoopBulkAction.java @@ -18,10 +18,10 @@ */ package org.elasticsearch.plugin.noop.action.bulk; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; import org.elasticsearch.action.bulk.BulkResponse; -public class NoopBulkAction extends Action { +public class NoopBulkAction extends StreamableResponseAction { public static final String NAME = "mock:data/write/bulk"; public static final NoopBulkAction INSTANCE = new NoopBulkAction(); diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/NoopSearchAction.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/NoopSearchAction.java index 9b390e1ffdd..aa316ae435a 100644 --- a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/NoopSearchAction.java +++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/NoopSearchAction.java @@ -30,11 +30,6 @@ public class NoopSearchAction extends Action { super(NAME); } - @Override - public SearchResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return SearchResponse::new; diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessorGetAction.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessorGetAction.java index 1141a4cf7e8..ee2f49390b8 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessorGetAction.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessorGetAction.java @@ -18,11 +18,11 @@ */ package org.elasticsearch.ingest.common; -import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.StreamableResponseAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.client.node.NodeClient; @@ -45,7 +45,7 @@ import java.util.Map; import static org.elasticsearch.ingest.common.IngestCommonPlugin.GROK_PATTERNS; import static org.elasticsearch.rest.RestRequest.Method.GET; -public class GrokProcessorGetAction extends Action { +public class GrokProcessorGetAction extends StreamableResponseAction { static final GrokProcessorGetAction INSTANCE = new GrokProcessorGetAction(); static final String NAME = "cluster:admin/ingest/processor/grok/get"; diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateAction.java index a9a44d04715..573c5888991 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateAction.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateAction.java @@ -31,11 +31,6 @@ public class MultiSearchTemplateAction extends Action getResponseReader() { return MultiSearchTemplateResponse::new; diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateAction.java index 5d905ec39e1..7bd57154e26 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateAction.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateAction.java @@ -31,11 +31,6 @@ public class SearchTemplateAction extends Action { super(NAME); } - @Override - public SearchTemplateResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return SearchTemplateResponse::new; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessContextAction.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessContextAction.java index c947de1fd82..4abad4d78af 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessContextAction.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessContextAction.java @@ -75,11 +75,6 @@ public class PainlessContextAction extends Action getResponseReader() { return Response::new; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java index ebcc5647d9b..bc34c90ad52 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java @@ -102,8 +102,8 @@ public class PainlessExecuteAction extends Action getResponseReader() { + return Response::new; } public static class Request extends SingleShardRequest implements ToXContentObject { diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalAction.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalAction.java index 54e89fe0e98..664377786f8 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalAction.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.index.rankeval; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; /** * Action for explaining evaluating search ranking results. */ -public class RankEvalAction extends Action { +public class RankEvalAction extends StreamableResponseAction { public static final RankEvalAction INSTANCE = new RankEvalAction(); public static final String NAME = "indices:data/read/rank_eval"; diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RethrottleAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RethrottleAction.java index 3cb2c60c623..513b4261bdf 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RethrottleAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RethrottleAction.java @@ -31,11 +31,6 @@ public class RethrottleAction extends Action { super(NAME); } - @Override - public ListTasksResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return ListTasksResponse::new; diff --git a/server/src/main/java/org/elasticsearch/action/Action.java b/server/src/main/java/org/elasticsearch/action/Action.java index f0df6202072..0037533797d 100644 --- a/server/src/main/java/org/elasticsearch/action/Action.java +++ b/server/src/main/java/org/elasticsearch/action/Action.java @@ -26,15 +26,27 @@ import org.elasticsearch.transport.TransportRequestOptions; /** * A generic action. Should strive to make it a singleton. */ -public abstract class Action { +public class Action { private final String name; + private final Writeable.Reader responseReader; /** * @param name The name of the action, must be unique across actions. + * @deprecated Pass a {@link Writeable.Reader} with {@link } */ + @Deprecated protected Action(String name) { + this(name, null); + } + + /** + * @param name The name of the action, must be unique across actions. + * @param responseReader A reader for the response type + */ + public Action(String name, Writeable.Reader responseReader) { this.name = name; + this.responseReader = responseReader; } /** @@ -44,23 +56,11 @@ public abstract class Action { return this.name; } - /** - * Creates a new response instance. - * @deprecated Implement {@link #getResponseReader()} instead and make this method throw an - * {@link UnsupportedOperationException} - */ - @Deprecated - public abstract Response newResponse(); - /** * Get a reader that can create a new instance of the class from a {@link org.elasticsearch.common.io.stream.StreamInput} */ public Writeable.Reader getResponseReader() { - return in -> { - Response response = newResponse(); - response.readFrom(in); - return response; - }; + return responseReader; } /** diff --git a/server/src/main/java/org/elasticsearch/action/StreamableResponseAction.java b/server/src/main/java/org/elasticsearch/action/StreamableResponseAction.java new file mode 100644 index 00000000000..c7eecfc35d7 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/StreamableResponseAction.java @@ -0,0 +1,51 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action; + +import org.elasticsearch.common.io.stream.Writeable; + +/** + * An action for with the response type implements {@link org.elasticsearch.common.io.stream.Streamable}. + * @deprecated Use {@link Action} directly and provide a {@link Writeable.Reader} + */ +@Deprecated +public abstract class StreamableResponseAction extends Action { + + protected StreamableResponseAction(String name) { + super(name); + } + + /** + * Creates a new response instance. + * @deprecated Implement {@link #getResponseReader()} instead and make this method throw an + * {@link UnsupportedOperationException} + */ + @Deprecated + public abstract Response newResponse(); + + @Override + public final Writeable.Reader getResponseReader() { + return in -> { + Response response = newResponse(); + response.readFrom(in); + return response; + }; + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainAction.java index 19d5378b305..b4b348ae97e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.admin.cluster.allocation; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; /** * Action for explaining shard allocation for a shard in the cluster */ -public class ClusterAllocationExplainAction extends Action { +public class ClusterAllocationExplainAction extends StreamableResponseAction { public static final ClusterAllocationExplainAction INSTANCE = new ClusterAllocationExplainAction(); public static final String NAME = "cluster:monitor/allocation/explain"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsAction.java index 6fdd324fa47..20f1e3c5044 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsAction.java @@ -29,11 +29,6 @@ public class AddVotingConfigExclusionsAction extends Action getResponseReader() { return AddVotingConfigExclusionsResponse::new; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsAction.java index 49b578f48ad..6cafcb7653f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsAction.java @@ -29,11 +29,6 @@ public class ClearVotingConfigExclusionsAction extends Action getResponseReader() { return ClearVotingConfigExclusionsResponse::new; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthAction.java index 0cd148ee231..ceb2a145fb6 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthAction.java @@ -19,9 +19,9 @@ package org.elasticsearch.action.admin.cluster.health; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; -public class ClusterHealthAction extends Action { +public class ClusterHealthAction extends StreamableResponseAction { public static final ClusterHealthAction INSTANCE = new ClusterHealthAction(); public static final String NAME = "cluster:monitor/health"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsAction.java index 4ea7ee5bc3b..317fa984163 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsAction.java @@ -19,9 +19,9 @@ package org.elasticsearch.action.admin.cluster.node.hotthreads; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; -public class NodesHotThreadsAction extends Action { +public class NodesHotThreadsAction extends StreamableResponseAction { public static final NodesHotThreadsAction INSTANCE = new NodesHotThreadsAction(); public static final String NAME = "cluster:monitor/nodes/hot_threads"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoAction.java index edc5ed7e83f..b860f07c8ff 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoAction.java @@ -19,9 +19,9 @@ package org.elasticsearch.action.admin.cluster.node.info; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; -public class NodesInfoAction extends Action { +public class NodesInfoAction extends StreamableResponseAction { public static final NodesInfoAction INSTANCE = new NodesInfoAction(); public static final String NAME = "cluster:monitor/nodes/info"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsAction.java index 19e8fc1929c..0a0c8a74fe9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsAction.java @@ -19,10 +19,10 @@ package org.elasticsearch.action.admin.cluster.node.reload; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; public class NodesReloadSecureSettingsAction - extends Action { + extends StreamableResponseAction { public static final NodesReloadSecureSettingsAction INSTANCE = new NodesReloadSecureSettingsAction(); public static final String NAME = "cluster:admin/nodes/reload_secure_settings"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsAction.java index bc8c81ef1e0..1febe1b4872 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsAction.java @@ -19,9 +19,9 @@ package org.elasticsearch.action.admin.cluster.node.stats; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; -public class NodesStatsAction extends Action { +public class NodesStatsAction extends StreamableResponseAction { public static final NodesStatsAction INSTANCE = new NodesStatsAction(); public static final String NAME = "cluster:monitor/nodes/stats"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksAction.java index 23cb69cf807..39532d18519 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksAction.java @@ -34,11 +34,6 @@ public class CancelTasksAction extends Action { super(NAME); } - @Override - public CancelTasksResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return CancelTasksResponse::new; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/GetTaskAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/GetTaskAction.java index 4e88963de4c..cdb5bbc3906 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/GetTaskAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/GetTaskAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.admin.cluster.node.tasks.get; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; /** * Action for retrieving a list of currently running tasks */ -public class GetTaskAction extends Action { +public class GetTaskAction extends StreamableResponseAction { public static final String TASKS_ORIGIN = "tasks"; public static final GetTaskAction INSTANCE = new GetTaskAction(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksAction.java index abba798c83c..ffec4aed056 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksAction.java @@ -34,11 +34,6 @@ public class ListTasksAction extends Action { super(NAME); } - @Override - public ListTasksResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return ListTasksResponse::new; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/usage/NodesUsageAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/usage/NodesUsageAction.java index 6bc6dce5494..34d864e1476 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/usage/NodesUsageAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/usage/NodesUsageAction.java @@ -19,9 +19,9 @@ package org.elasticsearch.action.admin.cluster.node.usage; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; -public class NodesUsageAction extends Action { +public class NodesUsageAction extends StreamableResponseAction { public static final NodesUsageAction INSTANCE = new NodesUsageAction(); public static final String NAME = "cluster:monitor/nodes/usage"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoAction.java index 3b998049daa..a3f2b2d406a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoAction.java @@ -19,9 +19,9 @@ package org.elasticsearch.action.admin.cluster.remote; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; -public final class RemoteInfoAction extends Action { +public final class RemoteInfoAction extends StreamableResponseAction { public static final String NAME = "cluster:monitor/remote/info"; public static final RemoteInfoAction INSTANCE = new RemoteInfoAction(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryAction.java index 205521a6b6f..f4b5cb2559f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryAction.java @@ -35,11 +35,6 @@ public class DeleteRepositoryAction extends Action { super(NAME); } - @Override - public AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return AcknowledgedResponse::new; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesAction.java index d89e466461d..f7567442320 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.admin.cluster.repositories.get; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; /** * Get repositories action */ -public class GetRepositoriesAction extends Action { +public class GetRepositoriesAction extends StreamableResponseAction { public static final GetRepositoriesAction INSTANCE = new GetRepositoriesAction(); public static final String NAME = "cluster:admin/repository/get"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryAction.java index ea021f6ba67..1e070c5ed98 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryAction.java @@ -35,11 +35,6 @@ public class PutRepositoryAction extends Action { super(NAME); } - @Override - public AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return AcknowledgedResponse::new; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryAction.java index 743b0a57f3a..67580e6bf81 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.admin.cluster.repositories.verify; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; /** * Unregister repository action */ -public class VerifyRepositoryAction extends Action { +public class VerifyRepositoryAction extends StreamableResponseAction { public static final VerifyRepositoryAction INSTANCE = new VerifyRepositoryAction(); public static final String NAME = "cluster:admin/repository/verify"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteAction.java index c341618729c..e92a136f838 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteAction.java @@ -31,11 +31,6 @@ public class ClusterRerouteAction extends Action { super(NAME); } - @Override - public ClusterRerouteResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return ClusterRerouteResponse::new; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsAction.java index c23a05338f2..9c359b919ee 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsAction.java @@ -31,11 +31,6 @@ public class ClusterUpdateSettingsAction extends Action getResponseReader() { return ClusterUpdateSettingsResponse::new; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsAction.java index 869aecf0954..cb323c6494d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsAction.java @@ -31,11 +31,6 @@ public class ClusterSearchShardsAction extends Action getResponseReader() { return ClusterSearchShardsResponse::new; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotAction.java index d37132a1d81..988ac070fb7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.admin.cluster.snapshots.create; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; /** * Create snapshot action */ -public class CreateSnapshotAction extends Action { +public class CreateSnapshotAction extends StreamableResponseAction { public static final CreateSnapshotAction INSTANCE = new CreateSnapshotAction(); public static final String NAME = "cluster:admin/snapshot/create"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotAction.java index 1413f0df068..8cf746e8460 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotAction.java @@ -35,11 +35,6 @@ public class DeleteSnapshotAction extends Action { super(NAME); } - @Override - public AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return AcknowledgedResponse::new; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsAction.java index b5015ff5c23..4c371140788 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.admin.cluster.snapshots.get; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; /** * Get snapshots action */ -public class GetSnapshotsAction extends Action { +public class GetSnapshotsAction extends StreamableResponseAction { public static final GetSnapshotsAction INSTANCE = new GetSnapshotsAction(); public static final String NAME = "cluster:admin/snapshot/get"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotAction.java index e633ce43e66..8cf8d461f4e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.admin.cluster.snapshots.restore; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; /** * Restore snapshot action */ -public class RestoreSnapshotAction extends Action { +public class RestoreSnapshotAction extends StreamableResponseAction { public static final RestoreSnapshotAction INSTANCE = new RestoreSnapshotAction(); public static final String NAME = "cluster:admin/snapshot/restore"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusAction.java index ea28d26b40f..09ca89f5d52 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.admin.cluster.snapshots.status; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; /** * Snapshots status action */ -public class SnapshotsStatusAction extends Action { +public class SnapshotsStatusAction extends StreamableResponseAction { public static final SnapshotsStatusAction INSTANCE = new SnapshotsStatusAction(); public static final String NAME = "cluster:admin/snapshot/status"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateAction.java index f48df06d53c..0087cf8d4f5 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateAction.java @@ -19,9 +19,9 @@ package org.elasticsearch.action.admin.cluster.state; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; -public class ClusterStateAction extends Action { +public class ClusterStateAction extends StreamableResponseAction { public static final ClusterStateAction INSTANCE = new ClusterStateAction(); public static final String NAME = "cluster:monitor/state"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsAction.java index 049ce62d9df..2e1aa0d023d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsAction.java @@ -19,9 +19,9 @@ package org.elasticsearch.action.admin.cluster.stats; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; -public class ClusterStatsAction extends Action { +public class ClusterStatsAction extends StreamableResponseAction { public static final ClusterStatsAction INSTANCE = new ClusterStatsAction(); public static final String NAME = "cluster:monitor/stats"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/DeleteStoredScriptAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/DeleteStoredScriptAction.java index 1e07090ea01..307440adfee 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/DeleteStoredScriptAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/DeleteStoredScriptAction.java @@ -32,11 +32,6 @@ public class DeleteStoredScriptAction extends Action { super(NAME); } - @Override - public AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return AcknowledgedResponse::new; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptAction.java index e8015a44874..6b4c229ebdd 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptAction.java @@ -19,9 +19,9 @@ package org.elasticsearch.action.admin.cluster.storedscripts; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; -public class GetStoredScriptAction extends Action { +public class GetStoredScriptAction extends StreamableResponseAction { public static final GetStoredScriptAction INSTANCE = new GetStoredScriptAction(); public static final String NAME = "cluster:admin/script/get"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptAction.java index 7168667af0e..673299f275b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptAction.java @@ -33,11 +33,6 @@ public class PutStoredScriptAction extends Action { super(NAME); } - @Override - public AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return AcknowledgedResponse::new; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksAction.java index 296c65146a0..adfe1054038 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksAction.java @@ -19,9 +19,9 @@ package org.elasticsearch.action.admin.cluster.tasks; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; -public class PendingClusterTasksAction extends Action { +public class PendingClusterTasksAction extends StreamableResponseAction { public static final PendingClusterTasksAction INSTANCE = new PendingClusterTasksAction(); public static final String NAME = "cluster:monitor/task"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesAction.java index 38b1844e73a..a0582ae704b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesAction.java @@ -32,11 +32,6 @@ public class IndicesAliasesAction extends Action { super(NAME); } - @Override - public AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return AcknowledgedResponse::new; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/AliasesExistAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/AliasesExistAction.java index dfaebab076c..08f980e153d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/AliasesExistAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/AliasesExistAction.java @@ -19,9 +19,9 @@ package org.elasticsearch.action.admin.indices.alias.exists; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; -public class AliasesExistAction extends Action { +public class AliasesExistAction extends StreamableResponseAction { public static final AliasesExistAction INSTANCE = new AliasesExistAction(); public static final String NAME = "indices:admin/aliases/exists"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesAction.java index db423c2aaaa..9f88d9a16c1 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesAction.java @@ -19,9 +19,9 @@ package org.elasticsearch.action.admin.indices.alias.get; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; -public class GetAliasesAction extends Action { +public class GetAliasesAction extends StreamableResponseAction { public static final GetAliasesAction INSTANCE = new GetAliasesAction(); public static final String NAME = "indices:admin/aliases/get"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeAction.java index 27b623776fa..cbd24a1fa88 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeAction.java @@ -59,11 +59,6 @@ public class AnalyzeAction extends Action { return Response::new; } - @Override - public Response newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - /** * A request to analyze a text associated with a specific index. Allow to provide * the actual analyzer name to perform the analysis with. diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheAction.java index e5bdd53bdc7..3dd135d9655 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheAction.java @@ -19,9 +19,9 @@ package org.elasticsearch.action.admin.indices.cache.clear; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; -public class ClearIndicesCacheAction extends Action { +public class ClearIndicesCacheAction extends StreamableResponseAction { public static final ClearIndicesCacheAction INSTANCE = new ClearIndicesCacheAction(); public static final String NAME = "indices:admin/cache/clear"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexAction.java index 2d87b75273c..d4c3bc4503d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexAction.java @@ -31,11 +31,6 @@ public class CloseIndexAction extends Action { super(NAME); } - @Override - public CloseIndexResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return CloseIndexResponse::new; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexAction.java index fd55ec4bf0b..620e1cc9f4a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexAction.java @@ -31,11 +31,6 @@ public class CreateIndexAction extends Action { super(NAME); } - @Override - public CreateIndexResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return CreateIndexResponse::new; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexAction.java index 9bb7b2d880a..0e45a77e728 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexAction.java @@ -32,11 +32,6 @@ public class DeleteIndexAction extends Action { super(NAME); } - @Override - public AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return AcknowledgedResponse::new; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/IndicesExistsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/IndicesExistsAction.java index b878994549f..629eda3a240 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/IndicesExistsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/IndicesExistsAction.java @@ -19,9 +19,9 @@ package org.elasticsearch.action.admin.indices.exists.indices; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; -public class IndicesExistsAction extends Action { +public class IndicesExistsAction extends StreamableResponseAction { public static final IndicesExistsAction INSTANCE = new IndicesExistsAction(); public static final String NAME = "indices:admin/exists"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TypesExistsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TypesExistsAction.java index 0b508110d7f..80b7ed0c0d8 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TypesExistsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TypesExistsAction.java @@ -18,9 +18,9 @@ */ package org.elasticsearch.action.admin.indices.exists.types; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; -public class TypesExistsAction extends Action { +public class TypesExistsAction extends StreamableResponseAction { public static final TypesExistsAction INSTANCE = new TypesExistsAction(); public static final String NAME = "indices:admin/types/exists"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushAction.java index 60d5b43a6c1..721eae72ee0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushAction.java @@ -19,9 +19,9 @@ package org.elasticsearch.action.admin.indices.flush; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; -public class FlushAction extends Action { +public class FlushAction extends StreamableResponseAction { public static final FlushAction INSTANCE = new FlushAction(); public static final String NAME = "indices:admin/flush"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushAction.java index 5005cd2ec08..b3af28dd239 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushAction.java @@ -19,10 +19,10 @@ package org.elasticsearch.action.admin.indices.flush; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; -public class SyncedFlushAction extends Action { +public class SyncedFlushAction extends StreamableResponseAction { public static final SyncedFlushAction INSTANCE = new SyncedFlushAction(); public static final String NAME = "indices:admin/synced_flush"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeAction.java index 51095435343..6b9866afa7a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeAction.java @@ -19,9 +19,9 @@ package org.elasticsearch.action.admin.indices.forcemerge; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; -public class ForceMergeAction extends Action { +public class ForceMergeAction extends StreamableResponseAction { public static final ForceMergeAction INSTANCE = new ForceMergeAction(); public static final String NAME = "indices:admin/forcemerge"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexAction.java index 86396f246a4..e2c663cb7da 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexAction.java @@ -19,9 +19,9 @@ package org.elasticsearch.action.admin.indices.get; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; -public class GetIndexAction extends Action { +public class GetIndexAction extends StreamableResponseAction { public static final GetIndexAction INSTANCE = new GetIndexAction(); public static final String NAME = "indices:admin/get"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsAction.java index d372d8cf93f..97cb7b3943d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsAction.java @@ -31,11 +31,6 @@ public class GetFieldMappingsAction extends Action { super(NAME); } - @Override - public GetFieldMappingsResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return GetFieldMappingsResponse::new; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsAction.java index 8bae685fff5..6b64cdec306 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsAction.java @@ -19,9 +19,9 @@ package org.elasticsearch.action.admin.indices.mapping.get; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; -public class GetMappingsAction extends Action { +public class GetMappingsAction extends StreamableResponseAction { public static final GetMappingsAction INSTANCE = new GetMappingsAction(); public static final String NAME = "indices:admin/mappings/get"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingAction.java index a4cc37f552e..117b378bbe5 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingAction.java @@ -32,11 +32,6 @@ public class PutMappingAction extends Action { super(NAME); } - @Override - public AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return AcknowledgedResponse::new; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexAction.java index 6d53a3a72d3..95cb4d4a78e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexAction.java @@ -31,11 +31,6 @@ public class OpenIndexAction extends Action { super(NAME); } - @Override - public OpenIndexResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return OpenIndexResponse::new; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/RecoveryAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/RecoveryAction.java index bfe261b5884..cea3db52495 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/RecoveryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/RecoveryAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.admin.indices.recovery; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; /** * Recovery information action */ -public class RecoveryAction extends Action { +public class RecoveryAction extends StreamableResponseAction { public static final RecoveryAction INSTANCE = new RecoveryAction(); public static final String NAME = "indices:monitor/recovery"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshAction.java index b0dac076b2f..63815fe2dcf 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshAction.java @@ -19,9 +19,9 @@ package org.elasticsearch.action.admin.indices.refresh; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; -public class RefreshAction extends Action { +public class RefreshAction extends StreamableResponseAction { public static final RefreshAction INSTANCE = new RefreshAction(); public static final String NAME = "indices:admin/refresh"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverAction.java index 091dc1e09e1..e7cc53dbf44 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverAction.java @@ -31,11 +31,6 @@ public class RolloverAction extends Action { super(NAME); } - @Override - public RolloverResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return RolloverResponse::new; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsAction.java index 669c31d6b08..b236bf0cc22 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsAction.java @@ -19,9 +19,9 @@ package org.elasticsearch.action.admin.indices.segments; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; -public class IndicesSegmentsAction extends Action { +public class IndicesSegmentsAction extends StreamableResponseAction { public static final IndicesSegmentsAction INSTANCE = new IndicesSegmentsAction(); public static final String NAME = "indices:monitor/segments"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsAction.java index e4149aaf8f1..4aa4fe57814 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsAction.java @@ -19,9 +19,9 @@ package org.elasticsearch.action.admin.indices.settings.get; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; -public class GetSettingsAction extends Action { +public class GetSettingsAction extends StreamableResponseAction { public static final GetSettingsAction INSTANCE = new GetSettingsAction(); public static final String NAME = "indices:monitor/settings/get"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsAction.java index acf07668ad3..af7750cff79 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsAction.java @@ -32,11 +32,6 @@ public class UpdateSettingsAction extends Action { super(NAME); } - @Override - public AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return AcknowledgedResponse::new; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresAction.java index d3ce0077d5e..3016633caeb 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresAction.java @@ -19,7 +19,7 @@ package org.elasticsearch.action.admin.indices.shards; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; /** * Action for {@link TransportIndicesShardStoresAction} @@ -28,7 +28,7 @@ import org.elasticsearch.action.Action; * Shard store information reports which nodes hold shard copies, how recent they are * and any exceptions on opening the shard index or from previous engine failures */ -public class IndicesShardStoresAction extends Action { +public class IndicesShardStoresAction extends StreamableResponseAction { public static final IndicesShardStoresAction INSTANCE = new IndicesShardStoresAction(); public static final String NAME = "indices:monitor/shard_stores"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeAction.java index 728bf89527a..ec86e7432d9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeAction.java @@ -33,11 +33,6 @@ public class ResizeAction extends Action { super(NAME); } - @Override - public ResizeResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return ResizeResponse::new; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkAction.java index 3aef52f1636..7c8c58e4864 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkAction.java @@ -31,11 +31,6 @@ public class ShrinkAction extends Action { super(NAME); } - @Override - public ResizeResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return ResizeResponse::new; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsAction.java index 6765279eb6b..5b91f3f31e5 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsAction.java @@ -19,9 +19,9 @@ package org.elasticsearch.action.admin.indices.stats; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; -public class IndicesStatsAction extends Action { +public class IndicesStatsAction extends StreamableResponseAction { public static final IndicesStatsAction INSTANCE = new IndicesStatsAction(); public static final String NAME = "indices:monitor/stats"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateAction.java index 2f3dd877f8a..c4a5bdc4381 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateAction.java @@ -32,11 +32,6 @@ public class DeleteIndexTemplateAction extends Action { super(NAME); } - @Override - public AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return AcknowledgedResponse::new; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesAction.java index ac00b80079c..3202fa3d6c8 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesAction.java @@ -18,9 +18,9 @@ */ package org.elasticsearch.action.admin.indices.template.get; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; -public class GetIndexTemplatesAction extends Action { +public class GetIndexTemplatesAction extends StreamableResponseAction { public static final GetIndexTemplatesAction INSTANCE = new GetIndexTemplatesAction(); public static final String NAME = "indices:admin/template/get"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateAction.java index 9af753bbfdc..88c3b9bb09d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateAction.java @@ -32,11 +32,6 @@ public class PutIndexTemplateAction extends Action { super(NAME); } - @Override - public AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return AcknowledgedResponse::new; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusAction.java index 57506b615d6..6545008486a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusAction.java @@ -19,9 +19,9 @@ package org.elasticsearch.action.admin.indices.upgrade.get; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; -public class UpgradeStatusAction extends Action { +public class UpgradeStatusAction extends StreamableResponseAction { public static final UpgradeStatusAction INSTANCE = new UpgradeStatusAction(); public static final String NAME = "indices:monitor/upgrade"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeAction.java index 7ec83930e44..ea5f511741a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.admin.indices.upgrade.post; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; /** * Upgrade index/indices action. */ -public class UpgradeAction extends Action { +public class UpgradeAction extends StreamableResponseAction { public static final UpgradeAction INSTANCE = new UpgradeAction(); public static final String NAME = "indices:admin/upgrade"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeSettingsAction.java index f830ebff3dd..324e796d214 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeSettingsAction.java @@ -32,11 +32,6 @@ public class UpgradeSettingsAction extends Action { super(NAME); } - @Override - public AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return AcknowledgedResponse::new; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryAction.java index 93151dd8a2b..2b0b21e28bc 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryAction.java @@ -19,9 +19,9 @@ package org.elasticsearch.action.admin.indices.validate.query; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; -public class ValidateQueryAction extends Action { +public class ValidateQueryAction extends StreamableResponseAction { public static final ValidateQueryAction INSTANCE = new ValidateQueryAction(); public static final String NAME = "indices:admin/validate/query"; diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkAction.java index f835b57a38b..ee5f32e1cfa 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkAction.java @@ -19,11 +19,11 @@ package org.elasticsearch.action.bulk; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.transport.TransportRequestOptions; -public class BulkAction extends Action { +public class BulkAction extends StreamableResponseAction { public static final BulkAction INSTANCE = new BulkAction(); public static final String NAME = "indices:data/write/bulk"; diff --git a/server/src/main/java/org/elasticsearch/action/delete/DeleteAction.java b/server/src/main/java/org/elasticsearch/action/delete/DeleteAction.java index d78b6f60bff..1d43fb102a6 100644 --- a/server/src/main/java/org/elasticsearch/action/delete/DeleteAction.java +++ b/server/src/main/java/org/elasticsearch/action/delete/DeleteAction.java @@ -19,9 +19,9 @@ package org.elasticsearch.action.delete; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; -public class DeleteAction extends Action { +public class DeleteAction extends StreamableResponseAction { public static final DeleteAction INSTANCE = new DeleteAction(); public static final String NAME = "indices:data/write/delete"; diff --git a/server/src/main/java/org/elasticsearch/action/explain/ExplainAction.java b/server/src/main/java/org/elasticsearch/action/explain/ExplainAction.java index ba5618ce7de..2546de35e54 100644 --- a/server/src/main/java/org/elasticsearch/action/explain/ExplainAction.java +++ b/server/src/main/java/org/elasticsearch/action/explain/ExplainAction.java @@ -34,11 +34,6 @@ public class ExplainAction extends Action { super(NAME); } - @Override - public ExplainResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return ExplainResponse::new; diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesAction.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesAction.java index 39c6ecce308..0cec94839c2 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesAction.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesAction.java @@ -19,9 +19,9 @@ package org.elasticsearch.action.fieldcaps; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; -public class FieldCapabilitiesAction extends Action { +public class FieldCapabilitiesAction extends StreamableResponseAction { public static final FieldCapabilitiesAction INSTANCE = new FieldCapabilitiesAction(); public static final String NAME = "indices:data/read/field_caps"; diff --git a/server/src/main/java/org/elasticsearch/action/get/GetAction.java b/server/src/main/java/org/elasticsearch/action/get/GetAction.java index 05d1b6c5a4c..383029090c5 100644 --- a/server/src/main/java/org/elasticsearch/action/get/GetAction.java +++ b/server/src/main/java/org/elasticsearch/action/get/GetAction.java @@ -31,11 +31,6 @@ public class GetAction extends Action { super(NAME); } - @Override - public GetResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return GetResponse::new; diff --git a/server/src/main/java/org/elasticsearch/action/get/MultiGetAction.java b/server/src/main/java/org/elasticsearch/action/get/MultiGetAction.java index 9b69e33239b..cfb90e1ccee 100644 --- a/server/src/main/java/org/elasticsearch/action/get/MultiGetAction.java +++ b/server/src/main/java/org/elasticsearch/action/get/MultiGetAction.java @@ -19,9 +19,9 @@ package org.elasticsearch.action.get; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; -public class MultiGetAction extends Action { +public class MultiGetAction extends StreamableResponseAction { public static final MultiGetAction INSTANCE = new MultiGetAction(); public static final String NAME = "indices:data/read/mget"; diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexAction.java b/server/src/main/java/org/elasticsearch/action/index/IndexAction.java index 4f3e6068a2a..6515f75f083 100644 --- a/server/src/main/java/org/elasticsearch/action/index/IndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/index/IndexAction.java @@ -19,9 +19,9 @@ package org.elasticsearch.action.index; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; -public class IndexAction extends Action { +public class IndexAction extends StreamableResponseAction { public static final IndexAction INSTANCE = new IndexAction(); public static final String NAME = "indices:data/write/index"; diff --git a/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineAction.java b/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineAction.java index c2842340ba4..11197491554 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineAction.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineAction.java @@ -32,11 +32,6 @@ public class DeletePipelineAction extends Action { super(NAME); } - @Override - public AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return AcknowledgedResponse::new; diff --git a/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineAction.java b/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineAction.java index b2305227ac6..ac699ea8c32 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineAction.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineAction.java @@ -19,9 +19,9 @@ package org.elasticsearch.action.ingest; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; -public class GetPipelineAction extends Action { +public class GetPipelineAction extends StreamableResponseAction { public static final GetPipelineAction INSTANCE = new GetPipelineAction(); public static final String NAME = "cluster:admin/ingest/pipeline/get"; diff --git a/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineAction.java b/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineAction.java index b614c847b4f..06038141e90 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineAction.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineAction.java @@ -32,11 +32,6 @@ public class PutPipelineAction extends Action { super(NAME); } - @Override - public AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return AcknowledgedResponse::new; diff --git a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineAction.java b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineAction.java index afeb4e01fb0..6d05c9fb690 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineAction.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineAction.java @@ -19,9 +19,9 @@ package org.elasticsearch.action.ingest; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; -public class SimulatePipelineAction extends Action { +public class SimulatePipelineAction extends StreamableResponseAction { public static final SimulatePipelineAction INSTANCE = new SimulatePipelineAction(); public static final String NAME = "cluster:admin/ingest/pipeline/simulate"; diff --git a/server/src/main/java/org/elasticsearch/action/main/MainAction.java b/server/src/main/java/org/elasticsearch/action/main/MainAction.java index 831ddd0983f..9d38e913f77 100644 --- a/server/src/main/java/org/elasticsearch/action/main/MainAction.java +++ b/server/src/main/java/org/elasticsearch/action/main/MainAction.java @@ -19,9 +19,9 @@ package org.elasticsearch.action.main; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; -public class MainAction extends Action { +public class MainAction extends StreamableResponseAction { public static final String NAME = "cluster:monitor/main"; public static final MainAction INSTANCE = new MainAction(); diff --git a/server/src/main/java/org/elasticsearch/action/search/ClearScrollAction.java b/server/src/main/java/org/elasticsearch/action/search/ClearScrollAction.java index 660ed1ee178..101d002d698 100644 --- a/server/src/main/java/org/elasticsearch/action/search/ClearScrollAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/ClearScrollAction.java @@ -19,9 +19,9 @@ package org.elasticsearch.action.search; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; -public class ClearScrollAction extends Action { +public class ClearScrollAction extends StreamableResponseAction { public static final ClearScrollAction INSTANCE = new ClearScrollAction(); public static final String NAME = "indices:data/read/scroll/clear"; diff --git a/server/src/main/java/org/elasticsearch/action/search/MultiSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/MultiSearchAction.java index 9017a7b94ec..fe1efc1e2d0 100644 --- a/server/src/main/java/org/elasticsearch/action/search/MultiSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/MultiSearchAction.java @@ -31,11 +31,6 @@ public class MultiSearchAction extends Action { super(NAME); } - @Override - public MultiSearchResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return MultiSearchResponse::new; diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchAction.java index d665595e8d3..dfa8d66098c 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchAction.java @@ -31,11 +31,6 @@ public class SearchAction extends Action { super(NAME); } - @Override - public SearchResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return SearchResponse::new; diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchScrollAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchScrollAction.java index 0b4adfc1ba5..6c5cf0e46f0 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchScrollAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchScrollAction.java @@ -31,11 +31,6 @@ public class SearchScrollAction extends Action { super(NAME); } - @Override - public SearchResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return SearchResponse::new; diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsAction.java b/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsAction.java index a894b3480f1..e3fa0d17cf1 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsAction.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsAction.java @@ -19,9 +19,9 @@ package org.elasticsearch.action.termvectors; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; -public class MultiTermVectorsAction extends Action { +public class MultiTermVectorsAction extends StreamableResponseAction { public static final MultiTermVectorsAction INSTANCE = new MultiTermVectorsAction(); public static final String NAME = "indices:data/read/mtv"; diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsAction.java b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsAction.java index 9b223eed3a3..12c11bff832 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsAction.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsAction.java @@ -31,11 +31,6 @@ public class TermVectorsAction extends Action { super(NAME); } - @Override - public TermVectorsResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return TermVectorsResponse::new; diff --git a/server/src/main/java/org/elasticsearch/action/update/UpdateAction.java b/server/src/main/java/org/elasticsearch/action/update/UpdateAction.java index 1c8c80b61ce..5835f7c88db 100644 --- a/server/src/main/java/org/elasticsearch/action/update/UpdateAction.java +++ b/server/src/main/java/org/elasticsearch/action/update/UpdateAction.java @@ -19,9 +19,9 @@ package org.elasticsearch.action.update; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; -public class UpdateAction extends Action { +public class UpdateAction extends StreamableResponseAction { public static final UpdateAction INSTANCE = new UpdateAction(); public static final String NAME = "indices:data/write/update"; diff --git a/server/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryAction.java b/server/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryAction.java index c7cfe28e2c0..97c4708ec1e 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryAction.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryAction.java @@ -19,9 +19,9 @@ package org.elasticsearch.index.reindex; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; -public class DeleteByQueryAction extends Action { +public class DeleteByQueryAction extends StreamableResponseAction { public static final DeleteByQueryAction INSTANCE = new DeleteByQueryAction(); public static final String NAME = "indices:data/write/delete/byquery"; diff --git a/server/src/main/java/org/elasticsearch/index/reindex/ReindexAction.java b/server/src/main/java/org/elasticsearch/index/reindex/ReindexAction.java index 86d0c96602a..945b080ca23 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/ReindexAction.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/ReindexAction.java @@ -19,9 +19,9 @@ package org.elasticsearch.index.reindex; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; -public class ReindexAction extends Action { +public class ReindexAction extends StreamableResponseAction { public static final ReindexAction INSTANCE = new ReindexAction(); public static final String NAME = "indices:data/write/reindex"; diff --git a/server/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryAction.java b/server/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryAction.java index 250a267ea25..d758a9c4113 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryAction.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryAction.java @@ -19,9 +19,9 @@ package org.elasticsearch.index.reindex; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; -public class UpdateByQueryAction extends Action { +public class UpdateByQueryAction extends StreamableResponseAction { public static final UpdateByQueryAction INSTANCE = new UpdateByQueryAction(); public static final String NAME = "indices:data/write/update/byquery"; diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseActions.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseActions.java index 3f292a7c8d1..a8805c1c5cd 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseActions.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseActions.java @@ -19,10 +19,10 @@ package org.elasticsearch.index.seqno; -import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.StreamableResponseAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.single.shard.SingleShardRequest; import org.elasticsearch.action.support.single.shard.TransportSingleShardAction; @@ -123,7 +123,7 @@ public class RetentionLeaseActions { } - public static class Add extends Action { + public static class Add extends StreamableResponseAction { public static final Add INSTANCE = new Add(); public static final String ACTION_NAME = "indices:admin/seq_no/add_retention_lease"; @@ -176,7 +176,7 @@ public class RetentionLeaseActions { } - public static class Renew extends Action { + public static class Renew extends StreamableResponseAction { public static final Renew INSTANCE = new Renew(); public static final String ACTION_NAME = "indices:admin/seq_no/renew_retention_lease"; @@ -222,7 +222,7 @@ public class RetentionLeaseActions { } - public static class Remove extends Action { + public static class Remove extends StreamableResponseAction { public static final Remove INSTANCE = new Remove(); public static final String ACTION_NAME = "indices:admin/seq_no/remove_retention_lease"; diff --git a/server/src/main/java/org/elasticsearch/persistent/CompletionPersistentTaskAction.java b/server/src/main/java/org/elasticsearch/persistent/CompletionPersistentTaskAction.java index b09c33b59d2..f376388aeb7 100644 --- a/server/src/main/java/org/elasticsearch/persistent/CompletionPersistentTaskAction.java +++ b/server/src/main/java/org/elasticsearch/persistent/CompletionPersistentTaskAction.java @@ -18,9 +18,9 @@ */ package org.elasticsearch.persistent; -import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.StreamableResponseAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; import org.elasticsearch.action.support.master.MasterNodeRequest; @@ -46,7 +46,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; * Action that is used by executor node to indicate that the persistent action finished or failed on the node and needs to be * removed from the cluster state in case of successful completion or restarted on some other node in case of failure. */ -public class CompletionPersistentTaskAction extends Action { +public class CompletionPersistentTaskAction extends StreamableResponseAction { public static final CompletionPersistentTaskAction INSTANCE = new CompletionPersistentTaskAction(); public static final String NAME = "cluster:admin/persistent/completion"; diff --git a/server/src/main/java/org/elasticsearch/persistent/RemovePersistentTaskAction.java b/server/src/main/java/org/elasticsearch/persistent/RemovePersistentTaskAction.java index 877033fe4f3..880b6862969 100644 --- a/server/src/main/java/org/elasticsearch/persistent/RemovePersistentTaskAction.java +++ b/server/src/main/java/org/elasticsearch/persistent/RemovePersistentTaskAction.java @@ -18,9 +18,9 @@ */ package org.elasticsearch.persistent; -import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.StreamableResponseAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; import org.elasticsearch.action.support.master.MasterNodeRequest; @@ -40,7 +40,7 @@ import org.elasticsearch.transport.TransportService; import java.io.IOException; import java.util.Objects; -public class RemovePersistentTaskAction extends Action { +public class RemovePersistentTaskAction extends StreamableResponseAction { public static final RemovePersistentTaskAction INSTANCE = new RemovePersistentTaskAction(); public static final String NAME = "cluster:admin/persistent/remove"; diff --git a/server/src/main/java/org/elasticsearch/persistent/StartPersistentTaskAction.java b/server/src/main/java/org/elasticsearch/persistent/StartPersistentTaskAction.java index 76e0c526eb6..d6e52551ac5 100644 --- a/server/src/main/java/org/elasticsearch/persistent/StartPersistentTaskAction.java +++ b/server/src/main/java/org/elasticsearch/persistent/StartPersistentTaskAction.java @@ -19,9 +19,9 @@ package org.elasticsearch.persistent; import org.elasticsearch.Version; -import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.StreamableResponseAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; import org.elasticsearch.action.support.master.MasterNodeRequest; @@ -47,7 +47,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; /** * This action can be used to add the record for the persistent action to the cluster state. */ -public class StartPersistentTaskAction extends Action { +public class StartPersistentTaskAction extends StreamableResponseAction { public static final StartPersistentTaskAction INSTANCE = new StartPersistentTaskAction(); public static final String NAME = "cluster:admin/persistent/start"; diff --git a/server/src/main/java/org/elasticsearch/persistent/UpdatePersistentTaskStatusAction.java b/server/src/main/java/org/elasticsearch/persistent/UpdatePersistentTaskStatusAction.java index 218154d37c9..089e92fe6a4 100644 --- a/server/src/main/java/org/elasticsearch/persistent/UpdatePersistentTaskStatusAction.java +++ b/server/src/main/java/org/elasticsearch/persistent/UpdatePersistentTaskStatusAction.java @@ -18,9 +18,9 @@ */ package org.elasticsearch.persistent; -import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.StreamableResponseAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; import org.elasticsearch.action.support.master.MasterNodeRequest; @@ -42,7 +42,7 @@ import java.util.Objects; import static org.elasticsearch.action.ValidateActions.addValidationError; -public class UpdatePersistentTaskStatusAction extends Action { +public class UpdatePersistentTaskStatusAction extends StreamableResponseAction { public static final UpdatePersistentTaskStatusAction INSTANCE = new UpdatePersistentTaskStatusAction(); public static final String NAME = "cluster:admin/persistent/update_status"; diff --git a/server/src/test/java/org/elasticsearch/action/ActionModuleTests.java b/server/src/test/java/org/elasticsearch/action/ActionModuleTests.java index 1bde97d0b8e..3ecb58a9173 100644 --- a/server/src/test/java/org/elasticsearch/action/ActionModuleTests.java +++ b/server/src/test/java/org/elasticsearch/action/ActionModuleTests.java @@ -88,7 +88,7 @@ public class ActionModuleTests extends ESTestCase { protected void doExecute(Task task, FakeRequest request, ActionListener listener) { } } - class FakeAction extends Action { + class FakeAction extends StreamableResponseAction { protected FakeAction() { super("fake"); } diff --git a/server/src/test/java/org/elasticsearch/action/ActionTests.java b/server/src/test/java/org/elasticsearch/action/ActionTests.java index a7dca3f098d..46a7f97653e 100644 --- a/server/src/test/java/org/elasticsearch/action/ActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/ActionTests.java @@ -24,7 +24,7 @@ import org.elasticsearch.test.ESTestCase; public class ActionTests extends ESTestCase { public void testEquals() { - class FakeAction extends Action { + class FakeAction extends StreamableResponseAction { protected FakeAction(String name) { super(name); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java index 05b19befa7c..61463e64042 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java @@ -25,6 +25,7 @@ import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.action.StreamableResponseAction; import org.elasticsearch.action.TaskOperationFailure; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.nodes.BaseNodeRequest; @@ -336,7 +337,7 @@ public class TestTaskPlugin extends Plugin implements ActionPlugin, NetworkPlugi } - public static class TestTaskAction extends Action { + public static class TestTaskAction extends StreamableResponseAction { public static final TestTaskAction INSTANCE = new TestTaskAction(); public static final String NAME = "cluster:admin/tasks/test"; @@ -470,11 +471,6 @@ public class TestTaskPlugin extends Plugin implements ActionPlugin, NetworkPlugi super(NAME); } - @Override - public UnblockTestTasksResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return UnblockTestTasksResponse::new; diff --git a/server/src/test/java/org/elasticsearch/indices/settings/InternalOrPrivateSettingsPlugin.java b/server/src/test/java/org/elasticsearch/indices/settings/InternalOrPrivateSettingsPlugin.java index 54750933ecd..d8fdfa5582c 100644 --- a/server/src/test/java/org/elasticsearch/indices/settings/InternalOrPrivateSettingsPlugin.java +++ b/server/src/test/java/org/elasticsearch/indices/settings/InternalOrPrivateSettingsPlugin.java @@ -19,11 +19,11 @@ package org.elasticsearch.indices.settings; -import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.StreamableResponseAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.action.support.master.TransportMasterNodeAction; @@ -62,7 +62,7 @@ public class InternalOrPrivateSettingsPlugin extends Plugin implements ActionPlu return Arrays.asList(INDEX_INTERNAL_SETTING, INDEX_PRIVATE_SETTING); } - public static class UpdateInternalOrPrivateAction extends Action { + public static class UpdateInternalOrPrivateAction extends StreamableResponseAction { public static final UpdateInternalOrPrivateAction INSTANCE = new UpdateInternalOrPrivateAction(); private static final String NAME = "indices:admin/settings/update-internal-or-private-index"; diff --git a/server/src/test/java/org/elasticsearch/persistent/TestPersistentTasksPlugin.java b/server/src/test/java/org/elasticsearch/persistent/TestPersistentTasksPlugin.java index 151129c0cc1..803cff5a151 100644 --- a/server/src/test/java/org/elasticsearch/persistent/TestPersistentTasksPlugin.java +++ b/server/src/test/java/org/elasticsearch/persistent/TestPersistentTasksPlugin.java @@ -405,11 +405,6 @@ public class TestPersistentTasksPlugin extends Plugin implements ActionPlugin, P super(NAME); } - @Override - public TestTasksResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return TestTasksResponse::new; diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java index 03495d34f29..c9499e67a89 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java @@ -61,11 +61,6 @@ public class ShardChangesAction extends Action { super(NAME); } - @Override - public Response newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return Response::new; diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsAction.java index a85e5c50e84..98f3e918503 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsAction.java @@ -5,9 +5,9 @@ */ package org.elasticsearch.xpack.ccr.action.bulk; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; -public class BulkShardOperationsAction extends Action { +public class BulkShardOperationsAction extends StreamableResponseAction { public static final BulkShardOperationsAction INSTANCE = new BulkShardOperationsAction(); public static final String NAME = "indices:data/write/bulk_shard_operations[s]"; diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/ClearCcrRestoreSessionAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/ClearCcrRestoreSessionAction.java index c4651e877fa..5bdc1df7553 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/ClearCcrRestoreSessionAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/ClearCcrRestoreSessionAction.java @@ -29,11 +29,6 @@ public class ClearCcrRestoreSessionAction extends Action getResponseReader() { return ClearCcrRestoreSessionResponse::new; diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/DeleteInternalCcrRepositoryAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/DeleteInternalCcrRepositoryAction.java index 93d432fe93f..b6f01481185 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/DeleteInternalCcrRepositoryAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/DeleteInternalCcrRepositoryAction.java @@ -29,11 +29,6 @@ public class DeleteInternalCcrRepositoryAction extends Action getResponseReader() { return DeleteInternalCcrRepositoryResponse::new; diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/GetCcrRestoreFileChunkAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/GetCcrRestoreFileChunkAction.java index 37dfc84f46a..64ac8c21881 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/GetCcrRestoreFileChunkAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/GetCcrRestoreFileChunkAction.java @@ -36,11 +36,6 @@ public class GetCcrRestoreFileChunkAction extends Action getResponseReader() { return GetCcrRestoreFileChunkResponse::new; diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/PutCcrRestoreSessionAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/PutCcrRestoreSessionAction.java index 91ec057ac4e..ed2d811b3b5 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/PutCcrRestoreSessionAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/PutCcrRestoreSessionAction.java @@ -39,11 +39,6 @@ public class PutCcrRestoreSessionAction extends Action getResponseReader() { return PutCcrRestoreSessionAction.PutCcrRestoreSessionResponse::new; diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/PutInternalCcrRepositoryAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/PutInternalCcrRepositoryAction.java index 397137ffb49..40ba9ab3421 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/PutInternalCcrRepositoryAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/PutInternalCcrRepositoryAction.java @@ -29,11 +29,6 @@ public class PutInternalCcrRepositoryAction extends Action getResponseReader() { return PutInternalCcrRepositoryResponse::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/DeleteLicenseAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/DeleteLicenseAction.java index 261f0318c98..18430399572 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/DeleteLicenseAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/DeleteLicenseAction.java @@ -18,11 +18,6 @@ public class DeleteLicenseAction extends Action { super(NAME); } - @Override - public AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return AcknowledgedResponse::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetBasicStatusAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetBasicStatusAction.java index be97ff59172..3b7b6d45c1c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetBasicStatusAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetBasicStatusAction.java @@ -5,9 +5,9 @@ */ package org.elasticsearch.license; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; -public class GetBasicStatusAction extends Action { +public class GetBasicStatusAction extends StreamableResponseAction { public static final GetBasicStatusAction INSTANCE = new GetBasicStatusAction(); public static final String NAME = "cluster:admin/xpack/license/basic_status"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetLicenseAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetLicenseAction.java index a6f19ea95b1..5db3c9cb335 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetLicenseAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetLicenseAction.java @@ -5,9 +5,9 @@ */ package org.elasticsearch.license; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; -public class GetLicenseAction extends Action { +public class GetLicenseAction extends StreamableResponseAction { public static final GetLicenseAction INSTANCE = new GetLicenseAction(); public static final String NAME = "cluster:monitor/xpack/license/get"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetTrialStatusAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetTrialStatusAction.java index 69c14e1b6dc..2b47f25e5ba 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetTrialStatusAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetTrialStatusAction.java @@ -5,9 +5,9 @@ */ package org.elasticsearch.license; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; -public class GetTrialStatusAction extends Action { +public class GetTrialStatusAction extends StreamableResponseAction { public static final GetTrialStatusAction INSTANCE = new GetTrialStatusAction(); public static final String NAME = "cluster:admin/xpack/license/trial_status"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartBasicAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartBasicAction.java index 446ff45501b..4b8ac56df96 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartBasicAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartBasicAction.java @@ -17,11 +17,6 @@ public class PostStartBasicAction extends Action { super(NAME); } - @Override - public PostStartBasicResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return PostStartBasicResponse::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialAction.java index 609fa42caab..385c2cb0975 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialAction.java @@ -5,9 +5,9 @@ */ package org.elasticsearch.license; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; -public class PostStartTrialAction extends Action { +public class PostStartTrialAction extends StreamableResponseAction { public static final PostStartTrialAction INSTANCE = new PostStartTrialAction(); public static final String NAME = "cluster:admin/xpack/license/start_trial"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PutLicenseAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PutLicenseAction.java index 263b0a4c556..4106058e9f3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PutLicenseAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PutLicenseAction.java @@ -18,11 +18,6 @@ public class PutLicenseAction extends Action { super(NAME); } - @Override - public PutLicenseResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return PutLicenseResponse::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/ReloadAnalyzerAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/ReloadAnalyzerAction.java index f37df1ec820..e5b1bde1ef1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/ReloadAnalyzerAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/ReloadAnalyzerAction.java @@ -5,9 +5,9 @@ */ package org.elasticsearch.xpack.core.action; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; -public class ReloadAnalyzerAction extends Action { +public class ReloadAnalyzerAction extends StreamableResponseAction { public static final ReloadAnalyzerAction INSTANCE = new ReloadAnalyzerAction(); public static final String NAME = "indices:admin/reload_analyzers"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/TransportFreezeIndexAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/TransportFreezeIndexAction.java index e42c599a051..25d3f4d1c03 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/TransportFreezeIndexAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/TransportFreezeIndexAction.java @@ -241,11 +241,6 @@ public final class TransportFreezeIndexAction extends super(NAME); } - @Override - public FreezeResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return FreezeResponse::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackInfoAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackInfoAction.java index a47ce7a41c6..935e773ee16 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackInfoAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackInfoAction.java @@ -5,10 +5,10 @@ */ package org.elasticsearch.xpack.core.action; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; import org.elasticsearch.protocol.xpack.XPackInfoResponse; -public class XPackInfoAction extends Action { +public class XPackInfoAction extends StreamableResponseAction { public static final String NAME = "cluster:monitor/xpack/info"; public static final XPackInfoAction INSTANCE = new XPackInfoAction(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageAction.java index 40311a4e884..68c7d05fbe3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageAction.java @@ -5,9 +5,9 @@ */ package org.elasticsearch.xpack.core.action; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; -public class XPackUsageAction extends Action { +public class XPackUsageAction extends StreamableResponseAction { public static final String NAME = "cluster:monitor/xpack/usage"; public static final XPackUsageAction INSTANCE = new XPackUsageAction(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/CcrStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/CcrStatsAction.java index 5b2033443dc..494cf3ec42a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/CcrStatsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/CcrStatsAction.java @@ -29,11 +29,6 @@ public class CcrStatsAction extends Action { super(NAME); } - @Override - public Response newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return Response::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/DeleteAutoFollowPatternAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/DeleteAutoFollowPatternAction.java index e23e24d1664..3be27c7ff3f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/DeleteAutoFollowPatternAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/DeleteAutoFollowPatternAction.java @@ -27,11 +27,6 @@ public class DeleteAutoFollowPatternAction extends Action super(NAME); } - @Override - public AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return AcknowledgedResponse::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/FollowInfoAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/FollowInfoAction.java index 122ae2b0a17..2bf96e67029 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/FollowInfoAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/FollowInfoAction.java @@ -32,11 +32,6 @@ public class FollowInfoAction extends Action { super(NAME); } - @Override - public Response newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return Response::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/FollowStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/FollowStatsAction.java index 72353f405cf..a1be7b83325 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/FollowStatsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/FollowStatsAction.java @@ -39,11 +39,6 @@ public class FollowStatsAction extends Action super(NAME); } - @Override - public StatsResponses newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return StatsResponses::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/ForgetFollowerAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/ForgetFollowerAction.java index d2a0b565496..5ad9b60427f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/ForgetFollowerAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/ForgetFollowerAction.java @@ -6,8 +6,8 @@ package org.elasticsearch.xpack.core.ccr.action; -import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.StreamableResponseAction; import org.elasticsearch.action.support.broadcast.BroadcastRequest; import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.common.ParseField; @@ -19,7 +19,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; import java.util.Objects; -public class ForgetFollowerAction extends Action { +public class ForgetFollowerAction extends StreamableResponseAction { public static final String NAME = "indices:admin/xpack/ccr/forget_follower"; public static final ForgetFollowerAction INSTANCE = new ForgetFollowerAction(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/GetAutoFollowPatternAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/GetAutoFollowPatternAction.java index cd37692da43..81ce4093f63 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/GetAutoFollowPatternAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/GetAutoFollowPatternAction.java @@ -30,11 +30,6 @@ public class GetAutoFollowPatternAction extends Action getResponseReader() { return Response::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PauseFollowAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PauseFollowAction.java index 748eb291f85..690d37af0ad 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PauseFollowAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PauseFollowAction.java @@ -26,11 +26,6 @@ public class PauseFollowAction extends Action { super(NAME); } - @Override - public AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return AcknowledgedResponse::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutAutoFollowPatternAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutAutoFollowPatternAction.java index 408eb1c38bf..443bce98218 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutAutoFollowPatternAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutAutoFollowPatternAction.java @@ -38,11 +38,6 @@ public class PutAutoFollowPatternAction extends Action { super(NAME); } - @Override - public AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return AcknowledgedResponse::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java index 89c18a9824a..ae302aea011 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java @@ -37,11 +37,6 @@ public final class PutFollowAction extends Action { super(NAME); } - @Override - public Response newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return Response::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/ResumeFollowAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/ResumeFollowAction.java index f22d4de0c8b..fecbbe31d2b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/ResumeFollowAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/ResumeFollowAction.java @@ -32,11 +32,6 @@ public final class ResumeFollowAction extends Action { super(NAME); } - @Override - public AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return AcknowledgedResponse::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/UnfollowAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/UnfollowAction.java index e7804eb21be..832c6fcf1b2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/UnfollowAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/UnfollowAction.java @@ -29,11 +29,6 @@ public class UnfollowAction extends Action { super(NAME); } - @Override - public AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return AcknowledgedResponse::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/DeleteDataFrameTransformAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/DeleteDataFrameTransformAction.java index 20f16a6a21f..cedd59e79e6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/DeleteDataFrameTransformAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/DeleteDataFrameTransformAction.java @@ -27,11 +27,6 @@ public class DeleteDataFrameTransformAction extends Action super(NAME); } - @Override - public AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return AcknowledgedResponse::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsAction.java index d8bcf730012..47ef20ffd50 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsAction.java @@ -40,11 +40,6 @@ public class GetDataFrameTransformsAction extends Action getResponseReader() { return Response::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsStatsAction.java index d6e46892ad3..64fbd782fea 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsStatsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsStatsAction.java @@ -43,11 +43,6 @@ public class GetDataFrameTransformsStatsAction extends Action getResponseReader() { return Response::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/PreviewDataFrameTransformAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/PreviewDataFrameTransformAction.java index 6108136a87b..a01d0a50ac2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/PreviewDataFrameTransformAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/PreviewDataFrameTransformAction.java @@ -44,11 +44,6 @@ public class PreviewDataFrameTransformAction extends Action getResponseReader() { return Response::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/PutDataFrameTransformAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/PutDataFrameTransformAction.java index ff9af3ab85f..a368ae04340 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/PutDataFrameTransformAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/PutDataFrameTransformAction.java @@ -38,11 +38,6 @@ public class PutDataFrameTransformAction extends Action { super(NAME); } - @Override - public AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return AcknowledgedResponse::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformAction.java index e2128a0f718..e091e6c346d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformAction.java @@ -31,11 +31,6 @@ public class StartDataFrameTransformAction extends Action getResponseReader() { return Response::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformTaskAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformTaskAction.java index fe453200fb2..8e7e393c6c6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformTaskAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformTaskAction.java @@ -32,11 +32,6 @@ public class StartDataFrameTransformTaskAction extends Action getResponseReader() { return Response::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StopDataFrameTransformAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StopDataFrameTransformAction.java index 0cbe7a45b63..a3f79c5bc74 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StopDataFrameTransformAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StopDataFrameTransformAction.java @@ -43,11 +43,6 @@ public class StopDataFrameTransformAction extends Action getResponseReader() { return Response::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/deprecation/DeprecationInfoAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/deprecation/DeprecationInfoAction.java index 28aa09f6c1e..854e90fb2c3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/deprecation/DeprecationInfoAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/deprecation/DeprecationInfoAction.java @@ -6,10 +6,10 @@ package org.elasticsearch.xpack.core.deprecation; import org.elasticsearch.Version; -import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.StreamableResponseAction; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; @@ -41,7 +41,7 @@ import java.util.stream.Collectors; import static org.elasticsearch.action.ValidateActions.addValidationError; -public class DeprecationInfoAction extends Action { +public class DeprecationInfoAction extends StreamableResponseAction { public static final DeprecationInfoAction INSTANCE = new DeprecationInfoAction(); public static final String NAME = "cluster:admin/xpack/deprecation/info"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/deprecation/NodesDeprecationCheckAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/deprecation/NodesDeprecationCheckAction.java index e27d70f9727..b7921ba9f4e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/deprecation/NodesDeprecationCheckAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/deprecation/NodesDeprecationCheckAction.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core.deprecation; import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; import org.elasticsearch.action.support.nodes.BaseNodeRequest; import org.elasticsearch.action.support.nodes.BaseNodeResponse; import org.elasticsearch.action.support.nodes.NodesOperationRequestBuilder; @@ -23,7 +24,7 @@ import java.util.Objects; * Runs deprecation checks on each node. Deprecation checks are performed locally so that filtered settings * can be accessed in the deprecation checks. */ -public class NodesDeprecationCheckAction extends Action { +public class NodesDeprecationCheckAction extends StreamableResponseAction { public static final NodesDeprecationCheckAction INSTANCE = new NodesDeprecationCheckAction(); public static final String NAME = "cluster:admin/xpack/deprecation/nodes/info"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreAction.java index e4fd8d04351..31725b3c0c2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreAction.java @@ -5,10 +5,10 @@ */ package org.elasticsearch.xpack.core.graph.action; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; import org.elasticsearch.protocol.xpack.graph.GraphExploreResponse; -public class GraphExploreAction extends Action { +public class GraphExploreAction extends StreamableResponseAction { public static final GraphExploreAction INSTANCE = new GraphExploreAction(); public static final String NAME = "indices:data/read/xpack/graph/explore"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/DeleteLifecycleAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/DeleteLifecycleAction.java index ba0b5598403..0416925f6e0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/DeleteLifecycleAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/DeleteLifecycleAction.java @@ -27,11 +27,6 @@ public class DeleteLifecycleAction extends Action getResponseReader() { return Response::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/ExplainLifecycleAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/ExplainLifecycleAction.java index 5acbbcb4967..b08ea01ac66 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/ExplainLifecycleAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/ExplainLifecycleAction.java @@ -6,10 +6,10 @@ package org.elasticsearch.xpack.core.indexlifecycle.action; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; import org.elasticsearch.xpack.core.indexlifecycle.ExplainLifecycleResponse; -public class ExplainLifecycleAction extends Action { +public class ExplainLifecycleAction extends StreamableResponseAction { public static final ExplainLifecycleAction INSTANCE = new ExplainLifecycleAction(); public static final String NAME = "indices:admin/ilm/explain"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/GetLifecycleAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/GetLifecycleAction.java index aaa295354a8..3d346384702 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/GetLifecycleAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/GetLifecycleAction.java @@ -6,9 +6,9 @@ package org.elasticsearch.xpack.core.indexlifecycle.action; -import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.StreamableResponseAction; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -23,7 +23,7 @@ import java.util.Arrays; import java.util.List; import java.util.Objects; -public class GetLifecycleAction extends Action { +public class GetLifecycleAction extends StreamableResponseAction { public static final GetLifecycleAction INSTANCE = new GetLifecycleAction(); public static final String NAME = "cluster:admin/ilm/get"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/GetStatusAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/GetStatusAction.java index 40765f0aa66..7fe301ff65e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/GetStatusAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/GetStatusAction.java @@ -6,9 +6,9 @@ package org.elasticsearch.xpack.core.indexlifecycle.action; -import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.StreamableResponseAction; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -20,7 +20,7 @@ import org.elasticsearch.xpack.core.indexlifecycle.OperationMode; import java.io.IOException; import java.util.Objects; -public class GetStatusAction extends Action { +public class GetStatusAction extends StreamableResponseAction { public static final GetStatusAction INSTANCE = new GetStatusAction(); public static final String NAME = "cluster:admin/ilm/operation_mode/get"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/MoveToStepAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/MoveToStepAction.java index 536e8534c90..d62ebd8bb9b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/MoveToStepAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/MoveToStepAction.java @@ -32,11 +32,6 @@ public class MoveToStepAction extends Action { super(NAME); } - @Override - public Response newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return Response::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/PutLifecycleAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/PutLifecycleAction.java index 5920b5d8ef5..a7ca96ba83a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/PutLifecycleAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/PutLifecycleAction.java @@ -31,11 +31,6 @@ public class PutLifecycleAction extends Action { super(NAME); } - @Override - public Response newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return Response::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/RemoveIndexLifecyclePolicyAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/RemoveIndexLifecyclePolicyAction.java index 0e530baa57f..2de8e31d171 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/RemoveIndexLifecyclePolicyAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/RemoveIndexLifecyclePolicyAction.java @@ -6,10 +6,10 @@ package org.elasticsearch.xpack.core.indexlifecycle.action; -import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.StreamableResponseAction; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.common.ParseField; @@ -24,7 +24,7 @@ import java.util.Arrays; import java.util.List; import java.util.Objects; -public class RemoveIndexLifecyclePolicyAction extends Action { +public class RemoveIndexLifecyclePolicyAction extends StreamableResponseAction { public static final RemoveIndexLifecyclePolicyAction INSTANCE = new RemoveIndexLifecyclePolicyAction(); public static final String NAME = "indices:admin/ilm/remove_policy"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/RetryAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/RetryAction.java index 25cce3e5cf0..227c131d249 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/RetryAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/RetryAction.java @@ -30,11 +30,6 @@ public class RetryAction extends Action { super(NAME); } - @Override - public Response newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return Response::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/StartILMAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/StartILMAction.java index a55d14e3f36..b0910ee399e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/StartILMAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/StartILMAction.java @@ -18,11 +18,6 @@ public class StartILMAction extends Action { super(NAME); } - @Override - public AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return AcknowledgedResponse::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/StopILMAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/StopILMAction.java index fc4a7a469e4..7345b9652f6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/StopILMAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/StopILMAction.java @@ -18,11 +18,6 @@ public class StopILMAction extends Action { super(NAME); } - @Override - public AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return AcknowledgedResponse::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/CloseJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/CloseJobAction.java index 9bc413b2e22..b8267b2f9f2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/CloseJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/CloseJobAction.java @@ -36,11 +36,6 @@ public class CloseJobAction extends Action { super(NAME); } - @Override - public Response newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return Response::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteCalendarAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteCalendarAction.java index dac62ba09a5..cee7b3cf0f4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteCalendarAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteCalendarAction.java @@ -29,11 +29,6 @@ public class DeleteCalendarAction extends Action { super(NAME); } - @Override - public AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return AcknowledgedResponse::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteCalendarEventAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteCalendarEventAction.java index 07c1575716c..a97ae6e69b8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteCalendarEventAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteCalendarEventAction.java @@ -30,11 +30,6 @@ public class DeleteCalendarEventAction extends Action { super(NAME); } - @Override - public AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return AcknowledgedResponse::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteDataFrameAnalyticsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteDataFrameAnalyticsAction.java index 9a777b23a4b..b7090051a3d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteDataFrameAnalyticsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteDataFrameAnalyticsAction.java @@ -31,11 +31,6 @@ public class DeleteDataFrameAnalyticsAction extends Action super(NAME); } - @Override - public AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return AcknowledgedResponse::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteDatafeedAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteDatafeedAction.java index 4b7de0d912f..04aa405c6f6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteDatafeedAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteDatafeedAction.java @@ -32,11 +32,6 @@ public class DeleteDatafeedAction extends Action { super(NAME); } - @Override - public AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return AcknowledgedResponse::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteExpiredDataAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteExpiredDataAction.java index 271d8ad5fa3..f9a661827b6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteExpiredDataAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteExpiredDataAction.java @@ -5,11 +5,11 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.StreamableResponseAction; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; @@ -20,7 +20,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; import java.util.Objects; -public class DeleteExpiredDataAction extends Action { +public class DeleteExpiredDataAction extends StreamableResponseAction { public static final DeleteExpiredDataAction INSTANCE = new DeleteExpiredDataAction(); public static final String NAME = "cluster:admin/xpack/ml/delete_expired_data"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteFilterAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteFilterAction.java index 6f1a6e72613..564e6f59534 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteFilterAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteFilterAction.java @@ -30,11 +30,6 @@ public class DeleteFilterAction extends Action { super(NAME); } - @Override - public AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return AcknowledgedResponse::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteForecastAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteForecastAction.java index 658cc8befee..055ae54702c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteForecastAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteForecastAction.java @@ -29,11 +29,6 @@ public class DeleteForecastAction extends Action { super(NAME); } - @Override - public AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return AcknowledgedResponse::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteJobAction.java index 37dcb8450f6..24c403f9397 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteJobAction.java @@ -33,11 +33,6 @@ public class DeleteJobAction extends Action { super(NAME); } - @Override - public AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return AcknowledgedResponse::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteModelSnapshotAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteModelSnapshotAction.java index 1a5181898a4..850543cf950 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteModelSnapshotAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteModelSnapshotAction.java @@ -29,11 +29,6 @@ public class DeleteModelSnapshotAction extends Action { super(NAME); } - @Override - public AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return AcknowledgedResponse::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/EvaluateDataFrameAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/EvaluateDataFrameAction.java index eec58428d55..f8a8efebb1c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/EvaluateDataFrameAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/EvaluateDataFrameAction.java @@ -5,11 +5,11 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.StreamableResponseAction; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; @@ -29,7 +29,7 @@ import java.util.Arrays; import java.util.List; import java.util.Objects; -public class EvaluateDataFrameAction extends Action { +public class EvaluateDataFrameAction extends StreamableResponseAction { public static final EvaluateDataFrameAction INSTANCE = new EvaluateDataFrameAction(); public static final String NAME = "cluster:monitor/xpack/ml/data_frame/evaluate"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FinalizeJobExecutionAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FinalizeJobExecutionAction.java index 4e081ad1cee..bd9e0107c4b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FinalizeJobExecutionAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FinalizeJobExecutionAction.java @@ -26,11 +26,6 @@ public class FinalizeJobExecutionAction extends Action { super(NAME); } - @Override - public AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return AcknowledgedResponse::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FindFileStructureAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FindFileStructureAction.java index 5961a2305ea..beffcd08f25 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FindFileStructureAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FindFileStructureAction.java @@ -6,11 +6,11 @@ package org.elasticsearch.xpack.core.ml.action; import org.elasticsearch.Version; -import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.StreamableResponseAction; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.bytes.BytesReference; @@ -31,7 +31,7 @@ import java.util.Objects; import static org.elasticsearch.action.ValidateActions.addValidationError; -public class FindFileStructureAction extends Action { +public class FindFileStructureAction extends StreamableResponseAction { public static final FindFileStructureAction INSTANCE = new FindFileStructureAction(); public static final String NAME = "cluster:monitor/xpack/ml/findfilestructure"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FlushJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FlushJobAction.java index b73d2b502a4..e54be42ba34 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FlushJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FlushJobAction.java @@ -34,11 +34,6 @@ public class FlushJobAction extends Action { super(NAME); } - @Override - public Response newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return Response::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ForecastJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ForecastJobAction.java index fb107579c6e..16010047f15 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ForecastJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ForecastJobAction.java @@ -33,11 +33,6 @@ public class ForecastJobAction extends Action { super(NAME); } - @Override - public Response newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return Response::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetBucketsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetBucketsAction.java index f0be0f74bfd..a75fc44e270 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetBucketsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetBucketsAction.java @@ -5,10 +5,10 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.StreamableResponseAction; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; @@ -28,7 +28,7 @@ import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import java.io.IOException; import java.util.Objects; -public class GetBucketsAction extends Action { +public class GetBucketsAction extends StreamableResponseAction { public static final GetBucketsAction INSTANCE = new GetBucketsAction(); public static final String NAME = "cluster:monitor/xpack/ml/job/results/buckets/get"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetCalendarEventsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetCalendarEventsAction.java index c2d25590513..342fab6a77c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetCalendarEventsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetCalendarEventsAction.java @@ -5,10 +5,10 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.StreamableResponseAction; import org.elasticsearch.action.ValidateActions; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.ParseField; @@ -29,7 +29,7 @@ import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import java.io.IOException; import java.util.Objects; -public class GetCalendarEventsAction extends Action { +public class GetCalendarEventsAction extends StreamableResponseAction { public static final GetCalendarEventsAction INSTANCE = new GetCalendarEventsAction(); public static final String NAME = "cluster:monitor/xpack/ml/calendars/events/get"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetCalendarsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetCalendarsAction.java index 3165e7f29ca..d172f88116f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetCalendarsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetCalendarsAction.java @@ -5,10 +5,10 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.StreamableResponseAction; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -28,7 +28,7 @@ import java.util.Objects; import static org.elasticsearch.action.ValidateActions.addValidationError; -public class GetCalendarsAction extends Action { +public class GetCalendarsAction extends StreamableResponseAction { public static final GetCalendarsAction INSTANCE = new GetCalendarsAction(); public static final String NAME = "cluster:monitor/xpack/ml/calendars/get"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetCategoriesAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetCategoriesAction.java index 3048f7b2bdd..b35fdc321de 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetCategoriesAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetCategoriesAction.java @@ -5,10 +5,10 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.StreamableResponseAction; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; @@ -29,7 +29,7 @@ import java.util.Objects; import static org.elasticsearch.action.ValidateActions.addValidationError; -public class GetCategoriesAction extends Action { +public class GetCategoriesAction extends StreamableResponseAction { public static final GetCategoriesAction INSTANCE = new GetCategoriesAction(); public static final String NAME = "cluster:monitor/xpack/ml/job/results/categories/get"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDataFrameAnalyticsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDataFrameAnalyticsAction.java index 92233fbb276..12772fd0962 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDataFrameAnalyticsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDataFrameAnalyticsAction.java @@ -5,8 +5,8 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.StreamableResponseAction; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; @@ -18,7 +18,7 @@ import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsConfig; import java.io.IOException; import java.util.Collections; -public class GetDataFrameAnalyticsAction extends Action { +public class GetDataFrameAnalyticsAction extends StreamableResponseAction { public static final GetDataFrameAnalyticsAction INSTANCE = new GetDataFrameAnalyticsAction(); public static final String NAME = "cluster:admin/xpack/ml/data_frame/analytics/get"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDataFrameAnalyticsStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDataFrameAnalyticsStatsAction.java index b14feaa8839..878b2c2d0dd 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDataFrameAnalyticsStatsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDataFrameAnalyticsStatsAction.java @@ -44,11 +44,6 @@ public class GetDataFrameAnalyticsStatsAction extends Action getResponseReader() { return Response::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsAction.java index 59589fa34ef..4988a8a02cf 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsAction.java @@ -6,8 +6,8 @@ package org.elasticsearch.xpack.core.ml.action; import org.elasticsearch.Version; -import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.StreamableResponseAction; import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; import org.elasticsearch.action.support.master.MasterNodeReadRequest; import org.elasticsearch.client.ElasticsearchClient; @@ -23,7 +23,7 @@ import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import java.io.IOException; import java.util.Objects; -public class GetDatafeedsAction extends Action { +public class GetDatafeedsAction extends StreamableResponseAction { public static final GetDatafeedsAction INSTANCE = new GetDatafeedsAction(); public static final String NAME = "cluster:monitor/xpack/ml/datafeeds/get"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsStatsAction.java index 6dbb86fbcd0..53c22d80dec 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsStatsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsStatsAction.java @@ -6,8 +6,8 @@ package org.elasticsearch.xpack.core.ml.action; import org.elasticsearch.Version; -import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.StreamableResponseAction; import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; import org.elasticsearch.action.support.master.MasterNodeReadRequest; import org.elasticsearch.client.ElasticsearchClient; @@ -29,7 +29,7 @@ import java.io.IOException; import java.util.Map; import java.util.Objects; -public class GetDatafeedsStatsAction extends Action { +public class GetDatafeedsStatsAction extends StreamableResponseAction { public static final GetDatafeedsStatsAction INSTANCE = new GetDatafeedsStatsAction(); public static final String NAME = "cluster:monitor/xpack/ml/datafeeds/stats/get"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetFiltersAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetFiltersAction.java index d63dfc39d73..5ad21ccd0d1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetFiltersAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetFiltersAction.java @@ -5,9 +5,9 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.StreamableResponseAction; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.xcontent.StatusToXContentObject; import org.elasticsearch.rest.RestStatus; @@ -17,11 +17,10 @@ import org.elasticsearch.xpack.core.action.util.PageParams; import org.elasticsearch.xpack.core.action.util.QueryPage; import org.elasticsearch.xpack.core.ml.job.config.MlFilter; - import static org.elasticsearch.action.ValidateActions.addValidationError; -public class GetFiltersAction extends Action { +public class GetFiltersAction extends StreamableResponseAction { public static final GetFiltersAction INSTANCE = new GetFiltersAction(); public static final String NAME = "cluster:admin/xpack/ml/filters/get"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetInfluencersAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetInfluencersAction.java index a15336a97c0..5c0e79e1fb0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetInfluencersAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetInfluencersAction.java @@ -5,10 +5,10 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.StreamableResponseAction; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; @@ -27,8 +27,7 @@ import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import java.io.IOException; import java.util.Objects; -public class GetInfluencersAction -extends Action { +public class GetInfluencersAction extends StreamableResponseAction { public static final GetInfluencersAction INSTANCE = new GetInfluencersAction(); public static final String NAME = "cluster:monitor/xpack/ml/job/results/influencers/get"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsAction.java index 18428eff137..30d031f1571 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsAction.java @@ -6,8 +6,8 @@ package org.elasticsearch.xpack.core.ml.action; import org.elasticsearch.Version; -import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.StreamableResponseAction; import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; import org.elasticsearch.action.support.master.MasterNodeReadRequest; import org.elasticsearch.client.ElasticsearchClient; @@ -23,7 +23,7 @@ import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import java.io.IOException; import java.util.Objects; -public class GetJobsAction extends Action { +public class GetJobsAction extends StreamableResponseAction { public static final GetJobsAction INSTANCE = new GetJobsAction(); public static final String NAME = "cluster:monitor/xpack/ml/job/get"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java index f76665b703a..c31e58eed4f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java @@ -58,11 +58,6 @@ public class GetJobsStatsAction extends Action { super(NAME); } - @Override - public Response newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return Response::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetModelSnapshotsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetModelSnapshotsAction.java index 1fc93b68e1a..edbb6f506a6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetModelSnapshotsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetModelSnapshotsAction.java @@ -5,10 +5,10 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.StreamableResponseAction; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; @@ -28,7 +28,7 @@ import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import java.io.IOException; import java.util.Objects; -public class GetModelSnapshotsAction extends Action { +public class GetModelSnapshotsAction extends StreamableResponseAction { public static final GetModelSnapshotsAction INSTANCE = new GetModelSnapshotsAction(); public static final String NAME = "cluster:monitor/xpack/ml/job/model_snapshots/get"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetOverallBucketsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetOverallBucketsAction.java index da1bc74da48..8f35319d248 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetOverallBucketsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetOverallBucketsAction.java @@ -6,10 +6,10 @@ package org.elasticsearch.xpack.core.ml.action; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.StreamableResponseAction; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; @@ -47,7 +47,7 @@ import java.util.function.LongSupplier; * the interval. *

*/ -public class GetOverallBucketsAction extends Action { +public class GetOverallBucketsAction extends StreamableResponseAction { public static final GetOverallBucketsAction INSTANCE = new GetOverallBucketsAction(); public static final String NAME = "cluster:monitor/xpack/ml/job/results/overall_buckets/get"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetRecordsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetRecordsAction.java index 46eeca8c700..41d7447fc5b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetRecordsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetRecordsAction.java @@ -5,10 +5,10 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.StreamableResponseAction; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; @@ -27,7 +27,7 @@ import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import java.io.IOException; import java.util.Objects; -public class GetRecordsAction extends Action { +public class GetRecordsAction extends StreamableResponseAction { public static final GetRecordsAction INSTANCE = new GetRecordsAction(); public static final String NAME = "cluster:monitor/xpack/ml/job/results/records/get"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/IsolateDatafeedAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/IsolateDatafeedAction.java index 8f681472ee8..33667fd27f8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/IsolateDatafeedAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/IsolateDatafeedAction.java @@ -44,11 +44,6 @@ public class IsolateDatafeedAction extends Action getResponseReader() { return Response::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/KillProcessAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/KillProcessAction.java index df54d693f75..f3f35f98ba1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/KillProcessAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/KillProcessAction.java @@ -25,11 +25,6 @@ public class KillProcessAction extends Action { super(NAME); } - @Override - public Response newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return Response::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/MlInfoAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/MlInfoAction.java index b0d635202c9..43db4a05784 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/MlInfoAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/MlInfoAction.java @@ -5,11 +5,11 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.StreamableResponseAction; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -21,7 +21,7 @@ import java.util.Collections; import java.util.Map; import java.util.Objects; -public class MlInfoAction extends Action { +public class MlInfoAction extends StreamableResponseAction { public static final MlInfoAction INSTANCE = new MlInfoAction(); public static final String NAME = "cluster:monitor/xpack/ml/info/get"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/OpenJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/OpenJobAction.java index aed6ac854ad..313e4784989 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/OpenJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/OpenJobAction.java @@ -44,11 +44,6 @@ public class OpenJobAction extends Action { super(NAME); } - @Override - public AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return AcknowledgedResponse::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PersistJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PersistJobAction.java index 3de585efc46..6520e669e71 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PersistJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PersistJobAction.java @@ -25,11 +25,6 @@ public class PersistJobAction extends Action { super(NAME); } - @Override - public Response newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return Response::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PostCalendarEventsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PostCalendarEventsAction.java index beff26eb34d..3df8769d338 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PostCalendarEventsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PostCalendarEventsAction.java @@ -6,11 +6,11 @@ package org.elasticsearch.xpack.core.ml.action; import org.elasticsearch.Version; -import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.StreamableResponseAction; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; @@ -30,7 +30,7 @@ import java.util.List; import java.util.Objects; import java.util.stream.Collectors; -public class PostCalendarEventsAction extends Action { +public class PostCalendarEventsAction extends StreamableResponseAction { public static final PostCalendarEventsAction INSTANCE = new PostCalendarEventsAction(); public static final String NAME = "cluster:admin/xpack/ml/calendars/events/post"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PostDataAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PostDataAction.java index 0393f2c4639..2b814f64cb9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PostDataAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PostDataAction.java @@ -33,11 +33,6 @@ public class PostDataAction extends Action { super(NAME); } - @Override - public Response newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return Response::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PreviewDatafeedAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PreviewDatafeedAction.java index af8a99b9828..b1a48aea5d8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PreviewDatafeedAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PreviewDatafeedAction.java @@ -5,11 +5,11 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.StreamableResponseAction; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; @@ -25,7 +25,7 @@ import java.io.IOException; import java.io.InputStream; import java.util.Objects; -public class PreviewDatafeedAction extends Action { +public class PreviewDatafeedAction extends StreamableResponseAction { public static final PreviewDatafeedAction INSTANCE = new PreviewDatafeedAction(); public static final String NAME = "cluster:admin/xpack/ml/datafeeds/preview"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutCalendarAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutCalendarAction.java index 345c4f1a96d..5be5722d72f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutCalendarAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutCalendarAction.java @@ -6,11 +6,11 @@ package org.elasticsearch.xpack.core.ml.action; import org.elasticsearch.Version; -import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.StreamableResponseAction; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -28,7 +28,7 @@ import java.util.Objects; import static org.elasticsearch.action.ValidateActions.addValidationError; -public class PutCalendarAction extends Action { +public class PutCalendarAction extends StreamableResponseAction { public static final PutCalendarAction INSTANCE = new PutCalendarAction(); public static final String NAME = "cluster:admin/xpack/ml/calendars/put"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutDataFrameAnalyticsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutDataFrameAnalyticsAction.java index e447aa70109..6034257eb8c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutDataFrameAnalyticsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutDataFrameAnalyticsAction.java @@ -5,9 +5,9 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.StreamableResponseAction; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; @@ -23,7 +23,7 @@ import org.elasticsearch.xpack.core.ml.job.messages.Messages; import java.io.IOException; import java.util.Objects; -public class PutDataFrameAnalyticsAction extends Action { +public class PutDataFrameAnalyticsAction extends StreamableResponseAction { public static final PutDataFrameAnalyticsAction INSTANCE = new PutDataFrameAnalyticsAction(); public static final String NAME = "cluster:admin/xpack/ml/data_frame/analytics/put"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutDatafeedAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutDatafeedAction.java index 448d8269735..521a4a9b5e7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutDatafeedAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutDatafeedAction.java @@ -6,9 +6,9 @@ package org.elasticsearch.xpack.core.ml.action; import org.elasticsearch.Version; -import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.StreamableResponseAction; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; @@ -22,7 +22,7 @@ import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import java.io.IOException; import java.util.Objects; -public class PutDatafeedAction extends Action { +public class PutDatafeedAction extends StreamableResponseAction { public static final PutDatafeedAction INSTANCE = new PutDatafeedAction(); public static final String NAME = "cluster:admin/xpack/ml/datafeeds/put"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutFilterAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutFilterAction.java index 0ed5e8f22aa..940f5afd24d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutFilterAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutFilterAction.java @@ -5,11 +5,11 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.StreamableResponseAction; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -25,7 +25,7 @@ import java.io.IOException; import java.util.Objects; -public class PutFilterAction extends Action { +public class PutFilterAction extends StreamableResponseAction { public static final PutFilterAction INSTANCE = new PutFilterAction(); public static final String NAME = "cluster:admin/xpack/ml/filters/put"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutJobAction.java index dc3983644f7..d34ecaea9c1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutJobAction.java @@ -6,9 +6,9 @@ package org.elasticsearch.xpack.core.ml.action; import org.elasticsearch.Version; -import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.StreamableResponseAction; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; @@ -25,7 +25,7 @@ import java.io.IOException; import java.util.List; import java.util.Objects; -public class PutJobAction extends Action { +public class PutJobAction extends StreamableResponseAction { public static final PutJobAction INSTANCE = new PutJobAction(); public static final String NAME = "cluster:admin/xpack/ml/job/put"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/RevertModelSnapshotAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/RevertModelSnapshotAction.java index 316598b6ab5..2844d9b2a8a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/RevertModelSnapshotAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/RevertModelSnapshotAction.java @@ -6,9 +6,9 @@ package org.elasticsearch.xpack.core.ml.action; import org.elasticsearch.Version; -import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.StreamableResponseAction; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; @@ -29,7 +29,7 @@ import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import java.io.IOException; import java.util.Objects; -public class RevertModelSnapshotAction extends Action { +public class RevertModelSnapshotAction extends StreamableResponseAction { public static final RevertModelSnapshotAction INSTANCE = new RevertModelSnapshotAction(); public static final String NAME = "cluster:admin/xpack/ml/job/model_snapshots/revert"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/SetUpgradeModeAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/SetUpgradeModeAction.java index b3a595d6d11..43737b12e53 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/SetUpgradeModeAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/SetUpgradeModeAction.java @@ -31,11 +31,6 @@ public class SetUpgradeModeAction extends Action { super(NAME); } - @Override - public AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return AcknowledgedResponse::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDataFrameAnalyticsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDataFrameAnalyticsAction.java index d722198bdfa..fc5a22f2c6e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDataFrameAnalyticsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDataFrameAnalyticsAction.java @@ -43,11 +43,6 @@ public class StartDataFrameAnalyticsAction extends Action super(NAME); } - @Override - public AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return AcknowledgedResponse::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDatafeedAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDatafeedAction.java index f7be0f0faa8..694b2b6104d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDatafeedAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDatafeedAction.java @@ -52,11 +52,6 @@ public class StartDatafeedAction extends Action { super(NAME); } - @Override - public AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return AcknowledgedResponse::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StopDataFrameAnalyticsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StopDataFrameAnalyticsAction.java index 43d382147fd..f7010a5b43f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StopDataFrameAnalyticsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StopDataFrameAnalyticsAction.java @@ -42,11 +42,6 @@ public class StopDataFrameAnalyticsAction extends Action getResponseReader() { return Response::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StopDatafeedAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StopDatafeedAction.java index c914150173b..d91ed19a35d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StopDatafeedAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StopDatafeedAction.java @@ -39,11 +39,6 @@ public class StopDatafeedAction extends Action { super(NAME); } - @Override - public Response newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return Response::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateCalendarJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateCalendarJobAction.java index e70a2e3189b..e4591860c63 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateCalendarJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateCalendarJobAction.java @@ -5,10 +5,10 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.StreamableResponseAction; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -18,7 +18,7 @@ import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import java.io.IOException; import java.util.Objects; -public class UpdateCalendarJobAction extends Action { +public class UpdateCalendarJobAction extends StreamableResponseAction { public static final UpdateCalendarJobAction INSTANCE = new UpdateCalendarJobAction(); public static final String NAME = "cluster:admin/xpack/ml/calendars/jobs/update"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateDatafeedAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateDatafeedAction.java index 6ba34efa839..920c2861af5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateDatafeedAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateDatafeedAction.java @@ -5,8 +5,8 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.StreamableResponseAction; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; @@ -20,7 +20,7 @@ import org.elasticsearch.xpack.core.ml.datafeed.DatafeedUpdate; import java.io.IOException; import java.util.Objects; -public class UpdateDatafeedAction extends Action { +public class UpdateDatafeedAction extends StreamableResponseAction { public static final UpdateDatafeedAction INSTANCE = new UpdateDatafeedAction(); public static final String NAME = "cluster:admin/xpack/ml/datafeeds/update"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateFilterAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateFilterAction.java index 57b3d3457d7..e4869b4cb32 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateFilterAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateFilterAction.java @@ -5,10 +5,10 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.StreamableResponseAction; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; @@ -32,7 +32,7 @@ import java.util.SortedSet; import java.util.TreeSet; -public class UpdateFilterAction extends Action { +public class UpdateFilterAction extends StreamableResponseAction { public static final UpdateFilterAction INSTANCE = new UpdateFilterAction(); public static final String NAME = "cluster:admin/xpack/ml/filters/update"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateJobAction.java index 85e1615c0df..d55321987c9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateJobAction.java @@ -6,8 +6,8 @@ package org.elasticsearch.xpack.core.ml.action; import org.elasticsearch.Version; -import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.StreamableResponseAction; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; @@ -24,7 +24,7 @@ import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import java.io.IOException; import java.util.Objects; -public class UpdateJobAction extends Action { +public class UpdateJobAction extends StreamableResponseAction { public static final UpdateJobAction INSTANCE = new UpdateJobAction(); public static final String NAME = "cluster:admin/xpack/ml/job/update"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateModelSnapshotAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateModelSnapshotAction.java index 1414719693f..b450280f5ee 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateModelSnapshotAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateModelSnapshotAction.java @@ -5,11 +5,11 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.StreamableResponseAction; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; @@ -29,7 +29,7 @@ import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import java.io.IOException; import java.util.Objects; -public class UpdateModelSnapshotAction extends Action { +public class UpdateModelSnapshotAction extends StreamableResponseAction { public static final UpdateModelSnapshotAction INSTANCE = new UpdateModelSnapshotAction(); public static final String NAME = "cluster:admin/xpack/ml/job/model_snapshots/update"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateProcessAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateProcessAction.java index 5091ff1f968..fb4519f7779 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateProcessAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateProcessAction.java @@ -33,11 +33,6 @@ public class UpdateProcessAction extends Action { super(NAME); } - @Override - public Response newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return Response::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ValidateDetectorAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ValidateDetectorAction.java index 84ea265fba6..4d6c61d8ebc 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ValidateDetectorAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ValidateDetectorAction.java @@ -31,11 +31,6 @@ public class ValidateDetectorAction extends Action { super(NAME); } - @Override - public AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return AcknowledgedResponse::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ValidateJobConfigAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ValidateJobConfigAction.java index 48da2603c70..80a7f9fd6ca 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ValidateJobConfigAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ValidateJobConfigAction.java @@ -32,11 +32,6 @@ public class ValidateJobConfigAction extends Action { super(NAME); } - @Override - public AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return AcknowledgedResponse::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkAction.java index 49fb085191e..3cb68b78177 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkAction.java @@ -5,9 +5,9 @@ */ package org.elasticsearch.xpack.core.monitoring.action; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; -public class MonitoringBulkAction extends Action { +public class MonitoringBulkAction extends StreamableResponseAction { public static final MonitoringBulkAction INSTANCE = new MonitoringBulkAction(); public static final String NAME = "cluster:admin/xpack/monitoring/bulk"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/DeleteRollupJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/DeleteRollupJobAction.java index 92a1a07ded0..a17c5314b1c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/DeleteRollupJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/DeleteRollupJobAction.java @@ -38,11 +38,6 @@ public class DeleteRollupJobAction extends Action getResponseReader() { return Response::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupCapsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupCapsAction.java index f544c21a15c..fb2345847fe 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupCapsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupCapsAction.java @@ -5,11 +5,11 @@ */ package org.elasticsearch.xpack.core.rollup.action; -import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.StreamableResponseAction; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.ParseField; @@ -17,7 +17,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.xcontent.ToXContent.Params; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -28,7 +27,7 @@ import java.util.Collections; import java.util.Map; import java.util.Objects; -public class GetRollupCapsAction extends Action { +public class GetRollupCapsAction extends StreamableResponseAction { public static final GetRollupCapsAction INSTANCE = new GetRollupCapsAction(); public static final String NAME = "cluster:monitor/xpack/rollup/get/caps"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupIndexCapsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupIndexCapsAction.java index 9dcd673c39f..60bc70247a9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupIndexCapsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupIndexCapsAction.java @@ -6,12 +6,12 @@ package org.elasticsearch.xpack.core.rollup.action; -import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.StreamableResponseAction; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.ParseField; @@ -19,7 +19,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.xcontent.ToXContent.Params; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -31,7 +30,7 @@ import java.util.Collections; import java.util.Map; import java.util.Objects; -public class GetRollupIndexCapsAction extends Action { +public class GetRollupIndexCapsAction extends StreamableResponseAction { public static final GetRollupIndexCapsAction INSTANCE = new GetRollupIndexCapsAction(); public static final String NAME = "indices:data/read/xpack/rollup/get/index/caps"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupJobsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupJobsAction.java index 913e544e741..13bd4c231e1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupJobsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupJobsAction.java @@ -47,11 +47,6 @@ public class GetRollupJobsAction extends Action { super(NAME); } - @Override - public Response newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return Response::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/PutRollupJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/PutRollupJobAction.java index d9900e53eff..2fb2444ebfc 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/PutRollupJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/PutRollupJobAction.java @@ -35,11 +35,6 @@ public class PutRollupJobAction extends Action { super(NAME); } - @Override - public AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return AcknowledgedResponse::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollupSearchAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollupSearchAction.java index c6eecca5e3d..0e80c9edf2d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollupSearchAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollupSearchAction.java @@ -21,11 +21,6 @@ public class RollupSearchAction extends Action { super(NAME); } - @Override - public SearchResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return SearchResponse::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/StartRollupJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/StartRollupJobAction.java index ca2a5cd8d72..dbe5a1ea59c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/StartRollupJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/StartRollupJobAction.java @@ -33,11 +33,6 @@ public class StartRollupJobAction extends Action super(NAME); } - @Override - public Response newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return Response::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/StopRollupJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/StopRollupJobAction.java index 6fc079e0328..d2c70304f38 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/StopRollupJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/StopRollupJobAction.java @@ -40,11 +40,6 @@ public class StopRollupJobAction extends Action { super(NAME); } - @Override - public Response newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return Response::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/CreateApiKeyAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/CreateApiKeyAction.java index 5d211ea70b5..1497962baf6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/CreateApiKeyAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/CreateApiKeyAction.java @@ -21,11 +21,6 @@ public final class CreateApiKeyAction extends Action { super(NAME); } - @Override - public CreateApiKeyResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return CreateApiKeyResponse::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/GetApiKeyAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/GetApiKeyAction.java index 2af331909a3..d405c78e6d2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/GetApiKeyAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/GetApiKeyAction.java @@ -21,11 +21,6 @@ public final class GetApiKeyAction extends Action { super(NAME); } - @Override - public GetApiKeyResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return GetApiKeyResponse::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/InvalidateApiKeyAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/InvalidateApiKeyAction.java index 0f5c7e66e72..de51379fca5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/InvalidateApiKeyAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/InvalidateApiKeyAction.java @@ -21,11 +21,6 @@ public final class InvalidateApiKeyAction extends Action getResponseReader() { return InvalidateApiKeyResponse::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectAuthenticateAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectAuthenticateAction.java index b27a71e202e..90397d41465 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectAuthenticateAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectAuthenticateAction.java @@ -20,11 +20,6 @@ public final class OpenIdConnectAuthenticateAction extends Action getResponseReader() { return OpenIdConnectAuthenticateResponse::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectLogoutAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectLogoutAction.java index 482484a7ded..18d6f73ecb6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectLogoutAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectLogoutAction.java @@ -17,11 +17,6 @@ public class OpenIdConnectLogoutAction extends Action getResponseReader() { return OpenIdConnectLogoutResponse::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectPrepareAuthenticationAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectPrepareAuthenticationAction.java index 2aa82c7286c..c1e0c3586a6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectPrepareAuthenticationAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectPrepareAuthenticationAction.java @@ -17,11 +17,6 @@ public class OpenIdConnectPrepareAuthenticationAction extends Action getResponseReader() { return OpenIdConnectPrepareAuthenticationResponse::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/DeletePrivilegesAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/DeletePrivilegesAction.java index a36d2fdec74..303dc37b1b6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/DeletePrivilegesAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/DeletePrivilegesAction.java @@ -5,12 +5,12 @@ */ package org.elasticsearch.xpack.core.security.action.privilege; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; /** * Action for deleting application privileges. */ -public final class DeletePrivilegesAction extends Action { +public final class DeletePrivilegesAction extends StreamableResponseAction { public static final DeletePrivilegesAction INSTANCE = new DeletePrivilegesAction(); public static final String NAME = "cluster:admin/xpack/security/privilege/delete"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/GetPrivilegesAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/GetPrivilegesAction.java index 0b8743228c5..b7a9af806d6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/GetPrivilegesAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/GetPrivilegesAction.java @@ -5,12 +5,12 @@ */ package org.elasticsearch.xpack.core.security.action.privilege; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; /** * Action for retrieving one or more application privileges from the security index */ -public final class GetPrivilegesAction extends Action { +public final class GetPrivilegesAction extends StreamableResponseAction { public static final GetPrivilegesAction INSTANCE = new GetPrivilegesAction(); public static final String NAME = "cluster:admin/xpack/security/privilege/get"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesAction.java index 3743bec144f..21532514bc4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesAction.java @@ -5,12 +5,12 @@ */ package org.elasticsearch.xpack.core.security.action.privilege; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; /** * Action for putting (adding/updating) one or more application privileges. */ -public final class PutPrivilegesAction extends Action { +public final class PutPrivilegesAction extends StreamableResponseAction { public static final PutPrivilegesAction INSTANCE = new PutPrivilegesAction(); public static final String NAME = "cluster:admin/xpack/security/privilege/put"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/realm/ClearRealmCacheAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/realm/ClearRealmCacheAction.java index 7c3cd58a7f4..481cb82b304 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/realm/ClearRealmCacheAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/realm/ClearRealmCacheAction.java @@ -5,9 +5,9 @@ */ package org.elasticsearch.xpack.core.security.action.realm; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; -public class ClearRealmCacheAction extends Action { +public class ClearRealmCacheAction extends StreamableResponseAction { public static final ClearRealmCacheAction INSTANCE = new ClearRealmCacheAction(); public static final String NAME = "cluster:admin/xpack/security/realm/cache/clear"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/ClearRolesCacheAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/ClearRolesCacheAction.java index 096b5380181..b5ec964d64b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/ClearRolesCacheAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/ClearRolesCacheAction.java @@ -5,12 +5,12 @@ */ package org.elasticsearch.xpack.core.security.action.role; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; /** * The action for clearing the cache used by native roles that are stored in an index. */ -public class ClearRolesCacheAction extends Action { +public class ClearRolesCacheAction extends StreamableResponseAction { public static final ClearRolesCacheAction INSTANCE = new ClearRolesCacheAction(); public static final String NAME = "cluster:admin/xpack/security/roles/cache/clear"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/DeleteRoleAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/DeleteRoleAction.java index 6130f107fb7..c58a78b7657 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/DeleteRoleAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/DeleteRoleAction.java @@ -5,12 +5,12 @@ */ package org.elasticsearch.xpack.core.security.action.role; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; /** * Action for deleting a role from the security index */ -public class DeleteRoleAction extends Action { +public class DeleteRoleAction extends StreamableResponseAction { public static final DeleteRoleAction INSTANCE = new DeleteRoleAction(); public static final String NAME = "cluster:admin/xpack/security/role/delete"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/GetRolesAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/GetRolesAction.java index 53126440afb..cc05e314b15 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/GetRolesAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/GetRolesAction.java @@ -5,12 +5,12 @@ */ package org.elasticsearch.xpack.core.security.action.role; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; /** * Action to retrieve a role from the security index */ -public class GetRolesAction extends Action { +public class GetRolesAction extends StreamableResponseAction { public static final GetRolesAction INSTANCE = new GetRolesAction(); public static final String NAME = "cluster:admin/xpack/security/role/get"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleAction.java index 8396625e262..692429bc0e8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleAction.java @@ -5,12 +5,12 @@ */ package org.elasticsearch.xpack.core.security.action.role; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; /** * Action for adding a role to the security index */ -public class PutRoleAction extends Action { +public class PutRoleAction extends StreamableResponseAction { public static final PutRoleAction INSTANCE = new PutRoleAction(); public static final String NAME = "cluster:admin/xpack/security/role/put"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/DeleteRoleMappingAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/DeleteRoleMappingAction.java index 6057daf9595..43fab6a5a6f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/DeleteRoleMappingAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/DeleteRoleMappingAction.java @@ -5,13 +5,13 @@ */ package org.elasticsearch.xpack.core.security.action.rolemapping; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; /** * Action for deleting a role-mapping from the * org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore */ -public class DeleteRoleMappingAction extends Action { +public class DeleteRoleMappingAction extends StreamableResponseAction { public static final DeleteRoleMappingAction INSTANCE = new DeleteRoleMappingAction(); public static final String NAME = "cluster:admin/xpack/security/role_mapping/delete"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/GetRoleMappingsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/GetRoleMappingsAction.java index e1488bf7091..39f410e42c1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/GetRoleMappingsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/GetRoleMappingsAction.java @@ -5,14 +5,14 @@ */ package org.elasticsearch.xpack.core.security.action.rolemapping; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; /** * Action to retrieve one or more role-mappings from X-Pack security * * see org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore */ -public class GetRoleMappingsAction extends Action { +public class GetRoleMappingsAction extends StreamableResponseAction { public static final GetRoleMappingsAction INSTANCE = new GetRoleMappingsAction(); public static final String NAME = "cluster:admin/xpack/security/role_mapping/get"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingAction.java index 9c3068adf12..8464c1f8177 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingAction.java @@ -5,12 +5,12 @@ */ package org.elasticsearch.xpack.core.security.action.rolemapping; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; /** * Action for adding a role to the security index */ -public class PutRoleMappingAction extends Action { +public class PutRoleMappingAction extends StreamableResponseAction { public static final PutRoleMappingAction INSTANCE = new PutRoleMappingAction(); public static final String NAME = "cluster:admin/xpack/security/role_mapping/put"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlAuthenticateAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlAuthenticateAction.java index fca733a3938..7cf767fbf82 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlAuthenticateAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlAuthenticateAction.java @@ -5,12 +5,12 @@ */ package org.elasticsearch.xpack.core.security.action.saml; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; /** * Action for authenticating using SAML assertions */ -public final class SamlAuthenticateAction extends Action { +public final class SamlAuthenticateAction extends StreamableResponseAction { public static final String NAME = "cluster:admin/xpack/security/saml/authenticate"; public static final SamlAuthenticateAction INSTANCE = new SamlAuthenticateAction(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlInvalidateSessionAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlInvalidateSessionAction.java index dc5aa096275..4b937657c67 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlInvalidateSessionAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlInvalidateSessionAction.java @@ -5,12 +5,12 @@ */ package org.elasticsearch.xpack.core.security.action.saml; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; /** * Action to perform IdP-initiated logout for a SAML-SSO user */ -public final class SamlInvalidateSessionAction extends Action { +public final class SamlInvalidateSessionAction extends StreamableResponseAction { public static final String NAME = "cluster:admin/xpack/security/saml/invalidate"; public static final SamlInvalidateSessionAction INSTANCE = new SamlInvalidateSessionAction(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlLogoutAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlLogoutAction.java index 9ea3a29ca4a..994064950db 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlLogoutAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlLogoutAction.java @@ -5,12 +5,12 @@ */ package org.elasticsearch.xpack.core.security.action.saml; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; /** * Action for initiating a logout process for a SAML-SSO user */ -public final class SamlLogoutAction extends Action { +public final class SamlLogoutAction extends StreamableResponseAction { public static final String NAME = "cluster:admin/xpack/security/saml/logout"; public static final SamlLogoutAction INSTANCE = new SamlLogoutAction(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlPrepareAuthenticationAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlPrepareAuthenticationAction.java index 12ad23ca501..035530bf752 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlPrepareAuthenticationAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlPrepareAuthenticationAction.java @@ -5,12 +5,12 @@ */ package org.elasticsearch.xpack.core.security.action.saml; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; /** * Action for initiating an authentication process using SAML assertions */ -public final class SamlPrepareAuthenticationAction extends Action { +public final class SamlPrepareAuthenticationAction extends StreamableResponseAction { public static final String NAME = "cluster:admin/xpack/security/saml/prepare"; public static final SamlPrepareAuthenticationAction INSTANCE = new SamlPrepareAuthenticationAction(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenAction.java index 7b913f594e5..2e5f25775f5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenAction.java @@ -5,12 +5,12 @@ */ package org.elasticsearch.xpack.core.security.action.token; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; /** * Action for creating a new token */ -public final class CreateTokenAction extends Action { +public final class CreateTokenAction extends StreamableResponseAction { public static final String NAME = "cluster:admin/xpack/security/token/create"; public static final CreateTokenAction INSTANCE = new CreateTokenAction(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/InvalidateTokenAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/InvalidateTokenAction.java index 57bd5bd35dd..77538144b75 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/InvalidateTokenAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/InvalidateTokenAction.java @@ -5,12 +5,12 @@ */ package org.elasticsearch.xpack.core.security.action.token; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; /** * Action for invalidating one or more tokens */ -public final class InvalidateTokenAction extends Action { +public final class InvalidateTokenAction extends StreamableResponseAction { public static final String NAME = "cluster:admin/xpack/security/token/invalidate"; public static final InvalidateTokenAction INSTANCE = new InvalidateTokenAction(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/RefreshTokenAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/RefreshTokenAction.java index 3478af2ec00..f57720228a9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/RefreshTokenAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/RefreshTokenAction.java @@ -5,9 +5,9 @@ */ package org.elasticsearch.xpack.core.security.action.token; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; -public final class RefreshTokenAction extends Action { +public final class RefreshTokenAction extends StreamableResponseAction { public static final String NAME = "cluster:admin/xpack/security/token/refresh"; public static final RefreshTokenAction INSTANCE = new RefreshTokenAction(); @@ -20,4 +20,4 @@ public final class RefreshTokenAction extends Action { public CreateTokenResponse newResponse() { return new CreateTokenResponse(); } -} +} \ No newline at end of file diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateAction.java index 18cfe85c8cb..b0b98023399 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateAction.java @@ -5,9 +5,9 @@ */ package org.elasticsearch.xpack.core.security.action.user; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; -public class AuthenticateAction extends Action { +public class AuthenticateAction extends StreamableResponseAction { public static final String NAME = "cluster:admin/xpack/security/user/authenticate"; public static final AuthenticateAction INSTANCE = new AuthenticateAction(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/ChangePasswordAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/ChangePasswordAction.java index d01717a64ea..afac98c6e83 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/ChangePasswordAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/ChangePasswordAction.java @@ -5,9 +5,9 @@ */ package org.elasticsearch.xpack.core.security.action.user; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; -public class ChangePasswordAction extends Action { +public class ChangePasswordAction extends StreamableResponseAction { public static final ChangePasswordAction INSTANCE = new ChangePasswordAction(); public static final String NAME = "cluster:admin/xpack/security/user/change_password"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/DeleteUserAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/DeleteUserAction.java index 78666759dc0..ebd378ba1d4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/DeleteUserAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/DeleteUserAction.java @@ -5,12 +5,12 @@ */ package org.elasticsearch.xpack.core.security.action.user; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; /** * Action for deleting a native user. */ -public class DeleteUserAction extends Action { +public class DeleteUserAction extends StreamableResponseAction { public static final DeleteUserAction INSTANCE = new DeleteUserAction(); public static final String NAME = "cluster:admin/xpack/security/user/delete"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUserPrivilegesAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUserPrivilegesAction.java index 6d51d74d899..027a5e2d1ff 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUserPrivilegesAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUserPrivilegesAction.java @@ -5,12 +5,12 @@ */ package org.elasticsearch.xpack.core.security.action.user; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; /** * Action that lists the set of privileges held by a user. */ -public final class GetUserPrivilegesAction extends Action { +public final class GetUserPrivilegesAction extends StreamableResponseAction { public static final GetUserPrivilegesAction INSTANCE = new GetUserPrivilegesAction(); public static final String NAME = "cluster:admin/xpack/security/user/list_privileges"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUsersAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUsersAction.java index 49532049ba9..78a0750c0b9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUsersAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUsersAction.java @@ -5,12 +5,12 @@ */ package org.elasticsearch.xpack.core.security.action.user; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; /** * Action for retrieving a user from the security index */ -public class GetUsersAction extends Action { +public class GetUsersAction extends StreamableResponseAction { public static final GetUsersAction INSTANCE = new GetUsersAction(); public static final String NAME = "cluster:admin/xpack/security/user/get"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesAction.java index 30bb44a2c1c..f97006a8652 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesAction.java @@ -5,14 +5,14 @@ */ package org.elasticsearch.xpack.core.security.action.user; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; /** * This action is testing whether a user has the specified * {@link RoleDescriptor.IndicesPrivileges privileges} */ -public class HasPrivilegesAction extends Action { +public class HasPrivilegesAction extends StreamableResponseAction { public static final HasPrivilegesAction INSTANCE = new HasPrivilegesAction(); public static final String NAME = "cluster:admin/xpack/security/user/has_privileges"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserAction.java index 20bbde2366b..ad0ad300b50 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserAction.java @@ -5,12 +5,12 @@ */ package org.elasticsearch.xpack.core.security.action.user; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; /** * Action for putting (adding/updating) a native user. */ -public class PutUserAction extends Action { +public class PutUserAction extends StreamableResponseAction { public static final PutUserAction INSTANCE = new PutUserAction(); public static final String NAME = "cluster:admin/xpack/security/user/put"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/SetEnabledAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/SetEnabledAction.java index 0368cdf7d7d..dad4d5ce2f5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/SetEnabledAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/SetEnabledAction.java @@ -5,12 +5,12 @@ */ package org.elasticsearch.xpack.core.security.action.user; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; /** * This action is for setting the enabled flag on a native or reserved user */ -public class SetEnabledAction extends Action { +public class SetEnabledAction extends StreamableResponseAction { public static final SetEnabledAction INSTANCE = new SetEnabledAction(); public static final String NAME = "cluster:admin/xpack/security/user/set_enabled"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/action/GetCertificateInfoAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/action/GetCertificateInfoAction.java index 4e1a84773db..64e4f8730fd 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/action/GetCertificateInfoAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/action/GetCertificateInfoAction.java @@ -5,11 +5,11 @@ */ package org.elasticsearch.xpack.core.ssl.action; -import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.StreamableResponseAction; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -25,7 +25,7 @@ import java.util.Collection; * Action to obtain information about X.509 (SSL/TLS) certificates that are being used by X-Pack. * The primary use case is for tracking the expiry dates of certificates. */ -public class GetCertificateInfoAction extends Action { +public class GetCertificateInfoAction extends StreamableResponseAction { public static final GetCertificateInfoAction INSTANCE = new GetCertificateInfoAction(); public static final String NAME = "cluster:monitor/xpack/ssl/certificates/get"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/upgrade/actions/IndexUpgradeAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/upgrade/actions/IndexUpgradeAction.java index 89279f4ea31..370b137e572 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/upgrade/actions/IndexUpgradeAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/upgrade/actions/IndexUpgradeAction.java @@ -5,9 +5,9 @@ */ package org.elasticsearch.xpack.core.upgrade.actions; -import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.StreamableResponseAction; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; import org.elasticsearch.action.support.master.MasterNodeReadRequest; @@ -26,7 +26,7 @@ import java.util.Objects; import static org.elasticsearch.action.ValidateActions.addValidationError; import static org.elasticsearch.xpack.core.upgrade.IndexUpgradeServiceFields.UPGRADE_INDEX_OPTIONS; -public class IndexUpgradeAction extends Action { +public class IndexUpgradeAction extends StreamableResponseAction { public static final IndexUpgradeAction INSTANCE = new IndexUpgradeAction(); public static final String NAME = "cluster:admin/xpack/upgrade"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/upgrade/actions/IndexUpgradeInfoAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/upgrade/actions/IndexUpgradeInfoAction.java index 3044c953a3e..9ace42634d7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/upgrade/actions/IndexUpgradeInfoAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/upgrade/actions/IndexUpgradeInfoAction.java @@ -5,14 +5,14 @@ */ package org.elasticsearch.xpack.core.upgrade.actions; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.protocol.xpack.migration.IndexUpgradeInfoRequest; import org.elasticsearch.protocol.xpack.migration.IndexUpgradeInfoResponse; -public class IndexUpgradeInfoAction extends Action { +public class IndexUpgradeInfoAction extends StreamableResponseAction { public static final IndexUpgradeInfoAction INSTANCE = new IndexUpgradeInfoAction(); public static final String NAME = "cluster:admin/xpack/upgrade/info"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/ack/AckWatchAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/ack/AckWatchAction.java index 04ec95a369a..1133bb7731b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/ack/AckWatchAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/ack/AckWatchAction.java @@ -5,12 +5,12 @@ */ package org.elasticsearch.xpack.core.watcher.transport.actions.ack; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; /** * This action acks a watch in memory, and the index */ -public class AckWatchAction extends Action { +public class AckWatchAction extends StreamableResponseAction { public static final AckWatchAction INSTANCE = new AckWatchAction(); public static final String NAME = "cluster:admin/xpack/watcher/watch/ack"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/activate/ActivateWatchAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/activate/ActivateWatchAction.java index 936a2171154..366aba32658 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/activate/ActivateWatchAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/activate/ActivateWatchAction.java @@ -5,12 +5,12 @@ */ package org.elasticsearch.xpack.core.watcher.transport.actions.activate; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; /** * This action acks a watch in memory, and the index */ -public class ActivateWatchAction extends Action { +public class ActivateWatchAction extends StreamableResponseAction { public static final ActivateWatchAction INSTANCE = new ActivateWatchAction(); public static final String NAME = "cluster:admin/xpack/watcher/watch/activate"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/delete/DeleteWatchAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/delete/DeleteWatchAction.java index eb440ddc251..5572572984c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/delete/DeleteWatchAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/delete/DeleteWatchAction.java @@ -5,13 +5,13 @@ */ package org.elasticsearch.xpack.core.watcher.transport.actions.delete; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; import org.elasticsearch.protocol.xpack.watcher.DeleteWatchResponse; /** * This action deletes an watch from in memory, the scheduler and the index */ -public class DeleteWatchAction extends Action { +public class DeleteWatchAction extends StreamableResponseAction { public static final DeleteWatchAction INSTANCE = new DeleteWatchAction(); public static final String NAME = "cluster:admin/xpack/watcher/watch/delete"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/execute/ExecuteWatchAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/execute/ExecuteWatchAction.java index 924f1709594..e11b0aabe2f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/execute/ExecuteWatchAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/execute/ExecuteWatchAction.java @@ -5,13 +5,13 @@ */ package org.elasticsearch.xpack.core.watcher.transport.actions.execute; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; /** * This action executes a watch, either ignoring the schedule and condition or just the schedule and can execute a subset of the actions, * optionally persisting the history entry */ -public class ExecuteWatchAction extends Action { +public class ExecuteWatchAction extends StreamableResponseAction { public static final ExecuteWatchAction INSTANCE = new ExecuteWatchAction(); public static final String NAME = "cluster:admin/xpack/watcher/watch/execute"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/get/GetWatchAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/get/GetWatchAction.java index 4df72a964b6..99bf6c3277e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/get/GetWatchAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/get/GetWatchAction.java @@ -5,10 +5,12 @@ */ package org.elasticsearch.xpack.core.watcher.transport.actions.get; +import org.elasticsearch.action.StreamableResponseAction; + /** * This action gets an watch by name */ -public class GetWatchAction extends org.elasticsearch.action.Action { +public class GetWatchAction extends StreamableResponseAction { public static final GetWatchAction INSTANCE = new GetWatchAction(); public static final String NAME = "cluster:monitor/xpack/watcher/watch/get"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/put/PutWatchAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/put/PutWatchAction.java index 509116b018e..d2771ca2b6c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/put/PutWatchAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/put/PutWatchAction.java @@ -5,13 +5,13 @@ */ package org.elasticsearch.xpack.core.watcher.transport.actions.put; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; import org.elasticsearch.protocol.xpack.watcher.PutWatchResponse; /** * This action puts an watch into the watch index and adds it to the scheduler */ -public class PutWatchAction extends Action { +public class PutWatchAction extends StreamableResponseAction { public static final PutWatchAction INSTANCE = new PutWatchAction(); public static final String NAME = "cluster:admin/xpack/watcher/watch/put"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/service/WatcherServiceAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/service/WatcherServiceAction.java index 4f85df827d7..61d7704a609 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/service/WatcherServiceAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/service/WatcherServiceAction.java @@ -19,11 +19,6 @@ public class WatcherServiceAction extends Action { super(NAME); } - @Override - public AcknowledgedResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return AcknowledgedResponse::new; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/stats/WatcherStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/stats/WatcherStatsAction.java index 59fcff090f5..065e95dcb12 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/stats/WatcherStatsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/stats/WatcherStatsAction.java @@ -5,12 +5,12 @@ */ package org.elasticsearch.xpack.core.watcher.transport.actions.stats; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; /** * This Action gets the stats for the watcher plugin */ -public class WatcherStatsAction extends Action { +public class WatcherStatsAction extends StreamableResponseAction { public static final WatcherStatsAction INSTANCE = new WatcherStatsAction(); public static final String NAME = "cluster:monitor/xpack/watcher/stats/dist"; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/MockClientBuilder.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/MockClientBuilder.java index 999c36c7b4f..ea4c2e41359 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/MockClientBuilder.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/MockClientBuilder.java @@ -140,13 +140,12 @@ public class MockClientBuilder { @SuppressWarnings({ "unchecked" }) public MockClientBuilder addIndicesDeleteResponse(String index, boolean exists, boolean exception, ActionListener actionListener) throws InterruptedException, ExecutionException, IOException { - AcknowledgedResponse response = DeleteIndexAction.INSTANCE.newResponse(); StreamInput si = mock(StreamInput.class); // this looks complicated but Mockito can't mock the final method // DeleteIndexResponse.isAcknowledged() and the only way to create // one with a true response is reading from a stream. when(si.readByte()).thenReturn((byte) 0x01); - response.readFrom(si); + AcknowledgedResponse response = DeleteIndexAction.INSTANCE.getResponseReader().read(si); doAnswer(invocation -> { DeleteIndexRequest deleteIndexRequest = (DeleteIndexRequest) invocation.getArguments()[0]; diff --git a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlClearCursorAction.java b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlClearCursorAction.java index ee824fc04e9..ccab2058374 100644 --- a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlClearCursorAction.java +++ b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlClearCursorAction.java @@ -5,9 +5,9 @@ */ package org.elasticsearch.xpack.sql.action; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; -public class SqlClearCursorAction extends Action { +public class SqlClearCursorAction extends StreamableResponseAction { public static final SqlClearCursorAction INSTANCE = new SqlClearCursorAction(); public static final String NAME = "indices:data/read/sql/close_cursor"; diff --git a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryAction.java b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryAction.java index f25eef31d3d..d40cdc55b96 100644 --- a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryAction.java +++ b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryAction.java @@ -5,9 +5,9 @@ */ package org.elasticsearch.xpack.sql.action; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; -public class SqlQueryAction extends Action { +public class SqlQueryAction extends StreamableResponseAction { public static final SqlQueryAction INSTANCE = new SqlQueryAction(); public static final String NAME = "indices:data/read/sql"; diff --git a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlTranslateAction.java b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlTranslateAction.java index 2431ecc1edf..15958516a5d 100644 --- a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlTranslateAction.java +++ b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlTranslateAction.java @@ -5,12 +5,12 @@ */ package org.elasticsearch.xpack.sql.action; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; /** * Sql action for translating SQL queries into ES requests */ -public class SqlTranslateAction extends Action { +public class SqlTranslateAction extends StreamableResponseAction { public static final SqlTranslateAction INSTANCE = new SqlTranslateAction(); public static final String NAME = "indices:data/read/sql/translate"; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlStatsAction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlStatsAction.java index 5f29c743325..135a1c6277c 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlStatsAction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlStatsAction.java @@ -6,9 +6,9 @@ package org.elasticsearch.xpack.sql.plugin; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseAction; -public class SqlStatsAction extends Action { +public class SqlStatsAction extends StreamableResponseAction { public static final SqlStatsAction INSTANCE = new SqlStatsAction(); public static final String NAME = "cluster:monitor/xpack/sql/stats/dist"; From b599c68d23fe4acc20d50142b8fc03d5b00c49cf Mon Sep 17 00:00:00 2001 From: David Roberts Date: Sat, 29 Jun 2019 07:51:29 +0100 Subject: [PATCH 38/42] [ML] Assert that a no-op job creates no results nor state (#43681) If a job is opened and then closed and does nothing in between then it should not persist any results or state documents. This change adapts the no-op job test to assert no results in addition to no state, and to log any documents that cause this assertion to fail. Relates elastic/ml-cpp#512 Relates #43680 --- .../xpack/ml/integration/PersistJobIT.java | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/PersistJobIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/PersistJobIT.java index a68fa2fe02a..b9666306911 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/PersistJobIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/PersistJobIT.java @@ -23,6 +23,7 @@ import java.util.Arrays; import java.util.List; import java.util.stream.Collectors; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -178,13 +179,12 @@ public class PersistJobIT extends MlNativeAutodetectIntegTestCase { closeJob(jobId); // Check that state has not been persisted - SearchResponse stateDocsResponse = client().prepareSearch(AnomalyDetectorsIndex.jobStateIndexPattern()) - .setFetchSource(false) - .setTrackTotalHits(true) - .setSize(10000) - .get(); + SearchResponse stateDocsResponse = client().prepareSearch(AnomalyDetectorsIndex.jobStateIndexPattern()).get(); + assertThat(Arrays.asList(stateDocsResponse.getHits().getHits()), empty()); - assertThat(stateDocsResponse.getHits().getTotalHits().value, equalTo(0L)); + // Check that results have not been persisted + SearchResponse resultsDocsResponse = client().prepareSearch(AnomalyDetectorsIndex.jobResultsAliasedName(jobId)).get(); + assertThat(Arrays.asList(resultsDocsResponse.getHits().getHits()), empty()); deleteJob(jobId); } From 5e17bc5dcc52b1c3dc92e51a6f154547aef3d24a Mon Sep 17 00:00:00 2001 From: Albert Zaharovits Date: Sat, 29 Jun 2019 23:26:17 +0300 Subject: [PATCH 39/42] Consistent Secure Settings #40416 Introduces a new `ConsistentSecureSettingsValidatorService` service that exposes a single public method, namely `allSecureSettingsConsistent`. The method returns `true` if the local node's secure settings (inside the keystore) are equal to the master's, and `false` otherwise. Technically, the local node has to have exactly the same secure settings - setting names should not be missing or in surplus - for all `SecureSetting` instances that are flagged with the newly introduced `Property.Consistent`. It is worth highlighting that the `allSecureSettingsConsistent` is not a consensus view across the cluster, but rather the local node's perspective in relation to the master. --- .../cluster/metadata/DiffableStringMap.java | 4 + .../cluster/metadata/MetaData.java | 50 +++- .../cluster/service/ClusterService.java | 2 +- .../common/hash/MessageDigests.java | 19 +- .../common/settings/ClusterSettings.java | 4 +- .../settings/ConsistentSettingsService.java | 257 ++++++++++++++++++ .../common/settings/KeyStoreWrapper.java | 61 +++-- .../common/settings/SecureSetting.java | 19 +- .../common/settings/SecureSettings.java | 2 + .../common/settings/Setting.java | 20 ++ .../common/settings/Settings.java | 9 +- .../common/settings/SettingsModule.java | 22 +- .../java/org/elasticsearch/node/Node.java | 4 + .../common/settings/ConsistentSettingsIT.java | 189 +++++++++++++ .../ConsistentSettingsServiceTests.java | 167 ++++++++++++ .../common/settings/KeyStoreWrapperTests.java | 23 ++ .../common/settings/SettingsModuleTests.java | 36 +++ .../common/settings/MockSecureSettings.java | 13 + .../notification/NotificationService.java | 17 +- .../NotificationServiceTests.java | 7 + 20 files changed, 886 insertions(+), 39 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/common/settings/ConsistentSettingsService.java create mode 100644 server/src/test/java/org/elasticsearch/common/settings/ConsistentSettingsIT.java create mode 100644 server/src/test/java/org/elasticsearch/common/settings/ConsistentSettingsServiceTests.java diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DiffableStringMap.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DiffableStringMap.java index 46433eed8a6..b6e31e92698 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DiffableStringMap.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DiffableStringMap.java @@ -39,6 +39,8 @@ import java.util.Set; */ public class DiffableStringMap extends AbstractMap implements Diffable { + public static final DiffableStringMap EMPTY = new DiffableStringMap(Collections.emptyMap()); + private final Map innerMap; DiffableStringMap(final Map map) { @@ -75,6 +77,8 @@ public class DiffableStringMap extends AbstractMap implements Di */ public static class DiffableStringMapDiff implements Diff { + public static final DiffableStringMapDiff EMPTY = new DiffableStringMapDiff(DiffableStringMap.EMPTY, DiffableStringMap.EMPTY); + private final List deletes; private final Map upserts; // diffs also become upserts diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java index 436de2e2e7a..16ee761af6e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java @@ -170,6 +170,7 @@ public class MetaData implements Iterable, Diffable, To private final Settings transientSettings; private final Settings persistentSettings; private final Settings settings; + private final DiffableStringMap hashesOfConsistentSettings; private final ImmutableOpenMap indices; private final ImmutableOpenMap templates; private final ImmutableOpenMap customs; @@ -184,7 +185,7 @@ public class MetaData implements Iterable, Diffable, To private final SortedMap aliasAndIndexLookup; MetaData(String clusterUUID, boolean clusterUUIDCommitted, long version, CoordinationMetaData coordinationMetaData, - Settings transientSettings, Settings persistentSettings, + Settings transientSettings, Settings persistentSettings, DiffableStringMap hashesOfConsistentSettings, ImmutableOpenMap indices, ImmutableOpenMap templates, ImmutableOpenMap customs, String[] allIndices, String[] allOpenIndices, String[] allClosedIndices, SortedMap aliasAndIndexLookup) { @@ -195,6 +196,7 @@ public class MetaData implements Iterable, Diffable, To this.transientSettings = transientSettings; this.persistentSettings = persistentSettings; this.settings = Settings.builder().put(persistentSettings).put(transientSettings).build(); + this.hashesOfConsistentSettings = hashesOfConsistentSettings; this.indices = indices; this.customs = customs; this.templates = templates; @@ -246,6 +248,10 @@ public class MetaData implements Iterable, Diffable, To return this.persistentSettings; } + public Map hashesOfConsistentSettings() { + return this.hashesOfConsistentSettings; + } + public CoordinationMetaData coordinationMetaData() { return this.coordinationMetaData; } @@ -767,6 +773,9 @@ public class MetaData implements Iterable, Diffable, To if (!metaData1.persistentSettings.equals(metaData2.persistentSettings)) { return false; } + if (!metaData1.hashesOfConsistentSettings.equals(metaData2.hashesOfConsistentSettings)) { + return false; + } if (!metaData1.templates.equals(metaData2.templates())) { return false; } @@ -821,6 +830,7 @@ public class MetaData implements Iterable, Diffable, To private CoordinationMetaData coordinationMetaData; private Settings transientSettings; private Settings persistentSettings; + private Diff hashesOfConsistentSettings; private Diff> indices; private Diff> templates; private Diff> customs; @@ -832,6 +842,7 @@ public class MetaData implements Iterable, Diffable, To coordinationMetaData = after.coordinationMetaData; transientSettings = after.transientSettings; persistentSettings = after.persistentSettings; + hashesOfConsistentSettings = after.hashesOfConsistentSettings.diff(before.hashesOfConsistentSettings); indices = DiffableUtils.diff(before.indices, after.indices, DiffableUtils.getStringKeySerializer()); templates = DiffableUtils.diff(before.templates, after.templates, DiffableUtils.getStringKeySerializer()); customs = DiffableUtils.diff(before.customs, after.customs, DiffableUtils.getStringKeySerializer(), CUSTOM_VALUE_SERIALIZER); @@ -850,6 +861,11 @@ public class MetaData implements Iterable, Diffable, To } transientSettings = Settings.readSettingsFromStream(in); persistentSettings = Settings.readSettingsFromStream(in); + if (in.getVersion().onOrAfter(Version.V_7_3_0)) { + hashesOfConsistentSettings = DiffableStringMap.readDiffFrom(in); + } else { + hashesOfConsistentSettings = DiffableStringMap.DiffableStringMapDiff.EMPTY; + } indices = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), IndexMetaData::readFrom, IndexMetaData::readDiffFrom); templates = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), IndexTemplateMetaData::readFrom, @@ -869,6 +885,9 @@ public class MetaData implements Iterable, Diffable, To } Settings.writeSettingsToStream(transientSettings, out); Settings.writeSettingsToStream(persistentSettings, out); + if (out.getVersion().onOrAfter(Version.V_7_3_0)) { + hashesOfConsistentSettings.writeTo(out); + } indices.writeTo(out); templates.writeTo(out); customs.writeTo(out); @@ -883,6 +902,7 @@ public class MetaData implements Iterable, Diffable, To builder.coordinationMetaData(coordinationMetaData); builder.transientSettings(transientSettings); builder.persistentSettings(persistentSettings); + builder.hashesOfConsistentSettings(hashesOfConsistentSettings.apply(part.hashesOfConsistentSettings)); builder.indices(indices.apply(part.indices)); builder.templates(templates.apply(part.templates)); builder.customs(customs.apply(part.customs)); @@ -902,6 +922,9 @@ public class MetaData implements Iterable, Diffable, To } builder.transientSettings(readSettingsFromStream(in)); builder.persistentSettings(readSettingsFromStream(in)); + if (in.getVersion().onOrAfter(Version.V_7_3_0)) { + builder.hashesOfConsistentSettings(new DiffableStringMap(in)); + } int size = in.readVInt(); for (int i = 0; i < size; i++) { builder.put(IndexMetaData.readFrom(in), false); @@ -930,6 +953,9 @@ public class MetaData implements Iterable, Diffable, To } writeSettingsToStream(transientSettings, out); writeSettingsToStream(persistentSettings, out); + if (out.getVersion().onOrAfter(Version.V_7_3_0)) { + hashesOfConsistentSettings.writeTo(out); + } out.writeVInt(indices.size()); for (IndexMetaData indexMetaData : this) { indexMetaData.writeTo(out); @@ -970,6 +996,7 @@ public class MetaData implements Iterable, Diffable, To private CoordinationMetaData coordinationMetaData = CoordinationMetaData.EMPTY_META_DATA; private Settings transientSettings = Settings.Builder.EMPTY_SETTINGS; private Settings persistentSettings = Settings.Builder.EMPTY_SETTINGS; + private DiffableStringMap hashesOfConsistentSettings = new DiffableStringMap(Collections.emptyMap()); private final ImmutableOpenMap.Builder indices; private final ImmutableOpenMap.Builder templates; @@ -989,6 +1016,7 @@ public class MetaData implements Iterable, Diffable, To this.coordinationMetaData = metaData.coordinationMetaData; this.transientSettings = metaData.transientSettings; this.persistentSettings = metaData.persistentSettings; + this.hashesOfConsistentSettings = metaData.hashesOfConsistentSettings; this.version = metaData.version; this.indices = ImmutableOpenMap.builder(metaData.indices); this.templates = ImmutableOpenMap.builder(metaData.templates); @@ -1152,6 +1180,20 @@ public class MetaData implements Iterable, Diffable, To return this; } + public DiffableStringMap hashesOfConsistentSettings() { + return this.hashesOfConsistentSettings; + } + + public Builder hashesOfConsistentSettings(DiffableStringMap hashesOfConsistentSettings) { + this.hashesOfConsistentSettings = hashesOfConsistentSettings; + return this; + } + + public Builder hashesOfConsistentSettings(Map hashesOfConsistentSettings) { + this.hashesOfConsistentSettings = new DiffableStringMap(hashesOfConsistentSettings); + return this; + } + public Builder version(long version) { this.version = version; return this; @@ -1225,8 +1267,8 @@ public class MetaData implements Iterable, Diffable, To String[] allClosedIndicesArray = allClosedIndices.toArray(new String[allClosedIndices.size()]); return new MetaData(clusterUUID, clusterUUIDCommitted, version, coordinationMetaData, transientSettings, persistentSettings, - indices.build(), templates.build(), customs.build(), allIndicesArray, allOpenIndicesArray, allClosedIndicesArray, - aliasAndIndexLookup); + hashesOfConsistentSettings, indices.build(), templates.build(), customs.build(), allIndicesArray, allOpenIndicesArray, + allClosedIndicesArray, aliasAndIndexLookup); } private SortedMap buildAliasAndIndexLookup() { @@ -1350,6 +1392,8 @@ public class MetaData implements Iterable, Diffable, To while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { builder.put(IndexMetaData.Builder.fromXContent(parser), false); } + } else if ("hashes_of_consistent_settings".equals(currentFieldName)) { + builder.hashesOfConsistentSettings(parser.mapStrings()); } else if ("templates".equals(currentFieldName)) { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { builder.put(IndexTemplateMetaData.Builder.fromXContent(parser, parser.currentName())); diff --git a/server/src/main/java/org/elasticsearch/cluster/service/ClusterService.java b/server/src/main/java/org/elasticsearch/cluster/service/ClusterService.java index f83f2606b14..fded43a4bdd 100644 --- a/server/src/main/java/org/elasticsearch/cluster/service/ClusterService.java +++ b/server/src/main/java/org/elasticsearch/cluster/service/ClusterService.java @@ -73,7 +73,7 @@ public class ClusterService extends AbstractLifecycleComponent { } public ClusterService(Settings settings, ClusterSettings clusterSettings, MasterService masterService, - ClusterApplierService clusterApplierService) { + ClusterApplierService clusterApplierService) { this.settings = settings; this.nodeName = Node.NODE_NAME_SETTING.get(settings); this.masterService = masterService; diff --git a/server/src/main/java/org/elasticsearch/common/hash/MessageDigests.java b/server/src/main/java/org/elasticsearch/common/hash/MessageDigests.java index 8bcef7b8ff4..df8f3e2fa7f 100644 --- a/server/src/main/java/org/elasticsearch/common/hash/MessageDigests.java +++ b/server/src/main/java/org/elasticsearch/common/hash/MessageDigests.java @@ -95,15 +95,24 @@ public final class MessageDigests { * @return a hex representation of the input as a String. */ public static String toHexString(byte[] bytes) { - Objects.requireNonNull(bytes); - StringBuilder sb = new StringBuilder(2 * bytes.length); + return new String(toHexCharArray(bytes)); + } + /** + * Encodes the byte array into a newly created hex char array, without allocating any other temporary variables. + * + * @param bytes the input to be encoded as hex. + * @return the hex encoding of the input as a char array. + */ + public static char[] toHexCharArray(byte[] bytes) { + Objects.requireNonNull(bytes); + final char[] result = new char[2 * bytes.length]; for (int i = 0; i < bytes.length; i++) { byte b = bytes[i]; - sb.append(HEX_DIGITS[b >> 4 & 0xf]).append(HEX_DIGITS[b & 0xf]); + result[2 * i] = HEX_DIGITS[b >> 4 & 0xf]; + result[2 * i + 1] = HEX_DIGITS[b & 0xf]; } - - return sb.toString(); + return result; } } diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index bf210644dc5..9e4f2d55d22 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -120,12 +120,12 @@ import java.util.function.Predicate; * Encapsulates all valid cluster level settings. */ public final class ClusterSettings extends AbstractScopedSettings { + public ClusterSettings(final Settings nodeSettings, final Set> settingsSet) { this(nodeSettings, settingsSet, Collections.emptySet()); } - public ClusterSettings( - final Settings nodeSettings, final Set> settingsSet, final Set> settingUpgraders) { + public ClusterSettings(final Settings nodeSettings, final Set> settingsSet, final Set> settingUpgraders) { super(nodeSettings, settingsSet, settingUpgraders, Property.NodeScope); addSettingsUpdater(new LoggingSettingUpdater(nodeSettings)); } diff --git a/server/src/main/java/org/elasticsearch/common/settings/ConsistentSettingsService.java b/server/src/main/java/org/elasticsearch/common/settings/ConsistentSettingsService.java new file mode 100644 index 00000000000..5a557f8b6a6 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/settings/ConsistentSettingsService.java @@ -0,0 +1,257 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.settings; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.LocalNodeMasterListener; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Priority; +import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.hash.MessageDigests; +import org.elasticsearch.threadpool.ThreadPool; + +import java.nio.charset.StandardCharsets; +import java.security.NoSuchAlgorithmException; +import java.security.spec.InvalidKeySpecException; +import java.util.Arrays; +import java.util.Base64; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Consumer; + +import javax.crypto.SecretKey; +import javax.crypto.SecretKeyFactory; +import javax.crypto.spec.PBEKeySpec; + +/** + * Used to publish secure setting hashes in the cluster state and to validate those hashes against the local values of those same settings. + * This is colloquially referred to as the secure setting consistency check. It will publish and verify hashes only for the collection + * of settings passed in the constructor. The settings have to have the {@link Setting.Property#Consistent} property. + */ +public final class ConsistentSettingsService { + private static final Logger logger = LogManager.getLogger(ConsistentSettingsService.class); + + private final Settings settings; + private final ClusterService clusterService; + private final Collection> secureSettingsCollection; + private final SecretKeyFactory pbkdf2KeyFactory; + + public ConsistentSettingsService(Settings settings, ClusterService clusterService, + Collection> secureSettingsCollection) { + this.settings = settings; + this.clusterService = clusterService; + this.secureSettingsCollection = secureSettingsCollection; + // this is used to compute the PBKDF2 hash (the published one) + try { + this.pbkdf2KeyFactory = SecretKeyFactory.getInstance("PBKDF2WithHmacSHA512"); + } catch (NoSuchAlgorithmException e) { + throw new RuntimeException("The \"PBKDF2WithHmacSHA512\" algorithm is required for consistent secure settings' hashes", e); + } + } + + /** + * Returns a {@link LocalNodeMasterListener} that will publish hashes of all the settings passed in the constructor. These hashes are + * published by the master node only. Note that this is not designed for {@link SecureSettings} implementations that are mutable. + */ + public LocalNodeMasterListener newHashPublisher() { + // eagerly compute hashes to be published + final Map computedHashesOfConsistentSettings = computeHashesOfConsistentSecureSettings(); + return new HashesPublisher(computedHashesOfConsistentSettings, clusterService); + } + + /** + * Verifies that the hashes of consistent secure settings in the latest {@code ClusterState} verify for the values of those same + * settings on the local node. The settings to be checked are passed in the constructor. Also, validates that a missing local + * value is also missing in the published set, and vice-versa. + */ + public boolean areAllConsistent() { + final ClusterState state = clusterService.state(); + final Map publishedHashesOfConsistentSettings = state.metaData().hashesOfConsistentSettings(); + final Set publishedSettingKeysToVerify = new HashSet<>(); + publishedSettingKeysToVerify.addAll(publishedHashesOfConsistentSettings.keySet()); + final AtomicBoolean allConsistent = new AtomicBoolean(true); + forEachConcreteSecureSettingDo(concreteSecureSetting -> { + final String publishedSaltAndHash = publishedHashesOfConsistentSettings.get(concreteSecureSetting.getKey()); + final byte[] localHash = concreteSecureSetting.getSecretDigest(settings); + if (publishedSaltAndHash == null && localHash == null) { + // consistency of missing + logger.debug("no published hash for the consistent secure setting [{}] but it also does NOT exist on the local node", + concreteSecureSetting.getKey()); + } else if (publishedSaltAndHash == null && localHash != null) { + // setting missing on master but present locally + logger.warn("no published hash for the consistent secure setting [{}] but it exists on the local node", + concreteSecureSetting.getKey()); + if (state.nodes().isLocalNodeElectedMaster()) { + throw new IllegalStateException("Master node cannot validate consistent setting. No published hash for [" + + concreteSecureSetting.getKey() + "] but setting exists."); + } + allConsistent.set(false); + } else if (publishedSaltAndHash != null && localHash == null) { + // setting missing locally but present on master + logger.warn("the consistent secure setting [{}] does not exist on the local node but there is a published hash for it", + concreteSecureSetting.getKey()); + allConsistent.set(false); + } else { + assert publishedSaltAndHash != null; + assert localHash != null; + final String[] parts = publishedSaltAndHash.split(":"); + if (parts == null || parts.length != 2) { + throw new IllegalArgumentException("published hash [" + publishedSaltAndHash + " ] for secure setting [" + + concreteSecureSetting.getKey() + "] is invalid"); + } + final String publishedSalt = parts[0]; + final String publishedHash = parts[1]; + final byte[] computedSaltedHashBytes = computeSaltedPBKDF2Hash(localHash, publishedSalt.getBytes(StandardCharsets.UTF_8)); + final String computedSaltedHash = new String(Base64.getEncoder().encode(computedSaltedHashBytes), StandardCharsets.UTF_8); + if (false == publishedHash.equals(computedSaltedHash)) { + logger.warn("the published hash [{}] of the consistent secure setting [{}] differs from the locally computed one [{}]", + publishedHash, concreteSecureSetting.getKey(), computedSaltedHash); + if (state.nodes().isLocalNodeElectedMaster()) { + throw new IllegalStateException("Master node cannot validate consistent setting. The published hash [" + + publishedHash + "] of the consistent secure setting [" + concreteSecureSetting.getKey() + + "] differs from the locally computed one [" + computedSaltedHash + "]."); + } + allConsistent.set(false); + } + } + publishedSettingKeysToVerify.remove(concreteSecureSetting.getKey()); + }); + // another case of settings missing locally, when group settings have not expanded to all the keys published + for (String publishedSettingKey : publishedSettingKeysToVerify) { + for (Setting setting : secureSettingsCollection) { + if (setting.match(publishedSettingKey)) { + // setting missing locally but present on master + logger.warn("the consistent secure setting [{}] does not exist on the local node but there is a published hash for it", + publishedSettingKey); + allConsistent.set(false); + } + } + } + return allConsistent.get(); + } + + /** + * Iterate over the passed in secure settings, expanding {@link Setting.AffixSetting} to concrete settings, in the scope of the local + * settings. + */ + private void forEachConcreteSecureSettingDo(Consumer> secureSettingConsumer) { + for (Setting setting : secureSettingsCollection) { + assert setting.isConsistent() : "[" + setting.getKey() + "] is not a consistent setting"; + if (setting instanceof Setting.AffixSetting) { + ((Setting.AffixSetting)setting).getAllConcreteSettings(settings).forEach(concreteSetting -> { + assert concreteSetting instanceof SecureSetting : "[" + concreteSetting.getKey() + "] is not a secure setting"; + secureSettingConsumer.accept((SecureSetting)concreteSetting); + }); + } else if (setting instanceof SecureSetting) { + secureSettingConsumer.accept((SecureSetting) setting); + } else { + assert false : "Unrecognized consistent secure setting [" + setting.getKey() + "]"; + } + } + } + + private Map computeHashesOfConsistentSecureSettings() { + final Map hashesBySettingKey = new HashMap<>(); + forEachConcreteSecureSettingDo(concreteSecureSetting -> { + final byte[] localHash = concreteSecureSetting.getSecretDigest(settings); + if (localHash != null) { + final String salt = UUIDs.randomBase64UUID(); + final byte[] publicHash = computeSaltedPBKDF2Hash(localHash, salt.getBytes(StandardCharsets.UTF_8)); + final String encodedPublicHash = new String(Base64.getEncoder().encode(publicHash), StandardCharsets.UTF_8); + hashesBySettingKey.put(concreteSecureSetting.getKey(), salt + ":" + encodedPublicHash); + } + }); + return hashesBySettingKey; + } + + private byte[] computeSaltedPBKDF2Hash(byte[] bytes, byte[] salt) { + final int iterations = 5000; + final int keyLength = 512; + char[] value = null; + try { + value = MessageDigests.toHexCharArray(bytes); + final PBEKeySpec spec = new PBEKeySpec(value, salt, iterations, keyLength); + final SecretKey key = pbkdf2KeyFactory.generateSecret(spec); + return key.getEncoded(); + } catch (InvalidKeySpecException e) { + throw new RuntimeException("Unexpected exception when computing PBKDF2 hash", e); + } finally { + if (value != null) { + Arrays.fill(value, '0'); + } + } + } + + static final class HashesPublisher implements LocalNodeMasterListener { + + // eagerly compute hashes to be published + final Map computedHashesOfConsistentSettings; + final ClusterService clusterService; + + HashesPublisher(Map computedHashesOfConsistentSettings, ClusterService clusterService) { + this.computedHashesOfConsistentSettings = Collections.unmodifiableMap(computedHashesOfConsistentSettings); + this.clusterService = clusterService; + } + + @Override + public void onMaster() { + clusterService.submitStateUpdateTask("publish-secure-settings-hashes", new ClusterStateUpdateTask(Priority.URGENT) { + @Override + public ClusterState execute(ClusterState currentState) { + final Map publishedHashesOfConsistentSettings = currentState.metaData() + .hashesOfConsistentSettings(); + if (computedHashesOfConsistentSettings.equals(publishedHashesOfConsistentSettings)) { + logger.debug("Nothing to publish. What is already published matches this node's view."); + return currentState; + } else { + return ClusterState.builder(currentState).metaData(MetaData.builder(currentState.metaData()) + .hashesOfConsistentSettings(computedHashesOfConsistentSettings)).build(); + } + } + + @Override + public void onFailure(String source, Exception e) { + logger.error("unable to publish secure settings hashes", e); + } + + }); + } + + @Override + public void offMaster() { + logger.trace("I am no longer master, nothing to do"); + } + + @Override + public String executorName() { + return ThreadPool.Names.SAME; + } + } + +} diff --git a/server/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java b/server/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java index 64cdd7165f2..7ad69c1eebe 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java +++ b/server/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java @@ -30,6 +30,7 @@ import org.apache.lucene.util.SetOnce; import org.elasticsearch.cli.ExitCodes; import org.elasticsearch.cli.UserException; import org.elasticsearch.common.Randomness; +import org.elasticsearch.common.hash.MessageDigests; import javax.crypto.Cipher; import javax.crypto.CipherInputStream; @@ -85,6 +86,17 @@ public class KeyStoreWrapper implements SecureSettings { FILE } + /** An entry in the keystore. The bytes are opaque and interpreted based on the entry type. */ + private static class Entry { + final byte[] bytes; + final byte[] sha256Digest; + + Entry(byte[] bytes) { + this.bytes = bytes; + this.sha256Digest = MessageDigests.sha256().digest(bytes); + } + } + /** * A regex for the valid characters that a setting name in the keystore may use. */ @@ -148,7 +160,7 @@ public class KeyStoreWrapper implements SecureSettings { private final byte[] dataBytes; /** The decrypted secret data. See {@link #decrypt(char[])}. */ - private final SetOnce> entries = new SetOnce<>(); + private final SetOnce> entries = new SetOnce<>(); private volatile boolean closed; private KeyStoreWrapper(int formatVersion, boolean hasPassword, byte[] dataBytes) { @@ -350,7 +362,7 @@ public class KeyStoreWrapper implements SecureSettings { int entrySize = input.readInt(); byte[] entryBytes = new byte[entrySize]; input.readFully(entryBytes); - entries.get().put(setting, entryBytes); + entries.get().put(setting, new Entry(entryBytes)); } if (input.read() != -1) { throw new SecurityException("Keystore has been corrupted or tampered with"); @@ -369,11 +381,11 @@ public class KeyStoreWrapper implements SecureSettings { try (CipherOutputStream cipherStream = new CipherOutputStream(bytes, cipher); DataOutputStream output = new DataOutputStream(cipherStream)) { output.writeInt(entries.get().size()); - for (Map.Entry mapEntry : entries.get().entrySet()) { + for (Map.Entry mapEntry : entries.get().entrySet()) { output.writeUTF(mapEntry.getKey()); - byte[] entry = mapEntry.getValue(); - output.writeInt(entry.length); - output.write(entry); + byte[] entryBytes = mapEntry.getValue().bytes; + output.writeInt(entryBytes.length); + output.write(entryBytes); } } return bytes.toByteArray(); @@ -448,7 +460,7 @@ public class KeyStoreWrapper implements SecureSettings { } Arrays.fill(chars, '\0'); - entries.get().put(setting, bytes); + entries.get().put(setting, new Entry(bytes)); } } @@ -521,8 +533,8 @@ public class KeyStoreWrapper implements SecureSettings { @Override public synchronized SecureString getString(String setting) { ensureOpen(); - byte[] entry = entries.get().get(setting); - ByteBuffer byteBuffer = ByteBuffer.wrap(entry); + Entry entry = entries.get().get(setting); + ByteBuffer byteBuffer = ByteBuffer.wrap(entry.bytes); CharBuffer charBuffer = StandardCharsets.UTF_8.decode(byteBuffer); return new SecureString(Arrays.copyOfRange(charBuffer.array(), charBuffer.position(), charBuffer.limit())); } @@ -530,8 +542,19 @@ public class KeyStoreWrapper implements SecureSettings { @Override public synchronized InputStream getFile(String setting) { ensureOpen(); - byte[] entry = entries.get().get(setting); - return new ByteArrayInputStream(entry); + Entry entry = entries.get().get(setting); + return new ByteArrayInputStream(entry.bytes); + } + + /** + * Returns the SHA256 digest for the setting's value, even after {@code #close()} has been called. The setting must exist. The digest is + * used to check for value changes without actually storing the value. + */ + @Override + public byte[] getSHA256Digest(String setting) { + assert entries.get() != null : "Keystore is not loaded"; + Entry entry = entries.get().get(setting); + return entry.sha256Digest; } /** @@ -553,9 +576,9 @@ public class KeyStoreWrapper implements SecureSettings { ByteBuffer byteBuffer = StandardCharsets.UTF_8.encode(CharBuffer.wrap(value)); byte[] bytes = Arrays.copyOfRange(byteBuffer.array(), byteBuffer.position(), byteBuffer.limit()); - byte[] oldEntry = entries.get().put(setting, bytes); + Entry oldEntry = entries.get().put(setting, new Entry(bytes)); if (oldEntry != null) { - Arrays.fill(oldEntry, (byte)0); + Arrays.fill(oldEntry.bytes, (byte)0); } } @@ -564,18 +587,18 @@ public class KeyStoreWrapper implements SecureSettings { ensureOpen(); validateSettingName(setting); - byte[] oldEntry = entries.get().put(setting, Arrays.copyOf(bytes, bytes.length)); + Entry oldEntry = entries.get().put(setting, new Entry(Arrays.copyOf(bytes, bytes.length))); if (oldEntry != null) { - Arrays.fill(oldEntry, (byte)0); + Arrays.fill(oldEntry.bytes, (byte)0); } } /** Remove the given setting from the keystore. */ void remove(String setting) { ensureOpen(); - byte[] oldEntry = entries.get().remove(setting); + Entry oldEntry = entries.get().remove(setting); if (oldEntry != null) { - Arrays.fill(oldEntry, (byte)0); + Arrays.fill(oldEntry.bytes, (byte)0); } } @@ -590,8 +613,8 @@ public class KeyStoreWrapper implements SecureSettings { public synchronized void close() { this.closed = true; if (null != entries.get() && entries.get().isEmpty() == false) { - for (byte[] entry : entries.get().values()) { - Arrays.fill(entry, (byte) 0); + for (Entry entry : entries.get().values()) { + Arrays.fill(entry.bytes, (byte) 0); } } } diff --git a/server/src/main/java/org/elasticsearch/common/settings/SecureSetting.java b/server/src/main/java/org/elasticsearch/common/settings/SecureSetting.java index 33f4718aa45..e022e4e3760 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/SecureSetting.java +++ b/server/src/main/java/org/elasticsearch/common/settings/SecureSetting.java @@ -37,7 +37,7 @@ public abstract class SecureSetting extends Setting { /** Determines whether legacy settings with sensitive values should be allowed. */ private static final boolean ALLOW_INSECURE_SETTINGS = Booleans.parseBoolean(System.getProperty("es.allow_insecure_settings", "false")); - private static final Set ALLOWED_PROPERTIES = EnumSet.of(Property.Deprecated); + private static final Set ALLOWED_PROPERTIES = EnumSet.of(Property.Deprecated, Property.Consistent); private static final Property[] FIXED_PROPERTIES = { Property.NodeScope @@ -97,6 +97,23 @@ public abstract class SecureSetting extends Setting { } } + /** + * Returns the digest of this secure setting's value or {@code null} if the setting is missing (inside the keystore). This method can be + * called even after the {@code SecureSettings} have been closed, unlike {@code #get(Settings)}. The digest is used to check for changes + * of the value (by re-reading the {@code SecureSettings}), without actually transmitting the value to compare with. + */ + public byte[] getSecretDigest(Settings settings) { + final SecureSettings secureSettings = settings.getSecureSettings(); + if (secureSettings == null || false == secureSettings.getSettingNames().contains(getKey())) { + return null; + } + try { + return secureSettings.getSHA256Digest(getKey()); + } catch (GeneralSecurityException e) { + throw new RuntimeException("failed to read secure setting " + getKey(), e); + } + } + /** Returns the secret setting from the keyStoreReader store. */ abstract T getSecret(SecureSettings secureSettings) throws GeneralSecurityException; diff --git a/server/src/main/java/org/elasticsearch/common/settings/SecureSettings.java b/server/src/main/java/org/elasticsearch/common/settings/SecureSettings.java index 98f980c1ec6..7f92b382dd7 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/SecureSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/SecureSettings.java @@ -42,6 +42,8 @@ public interface SecureSettings extends Closeable { /** Return a file setting. The {@link InputStream} should be closed once it is used. */ InputStream getFile(String setting) throws GeneralSecurityException; + byte[] getSHA256Digest(String setting) throws GeneralSecurityException; + @Override void close() throws IOException; } diff --git a/server/src/main/java/org/elasticsearch/common/settings/Setting.java b/server/src/main/java/org/elasticsearch/common/settings/Setting.java index 11a24052266..396492d8f8f 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/Setting.java +++ b/server/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -113,6 +113,11 @@ public class Setting implements ToXContentObject { */ NodeScope, + /** + * Secure setting values equal on all nodes + */ + Consistent, + /** * Index scope */ @@ -168,6 +173,7 @@ public class Setting implements ToXContentObject { checkPropertyRequiresIndexScope(propertiesAsSet, Property.NotCopyableOnResize); checkPropertyRequiresIndexScope(propertiesAsSet, Property.InternalIndex); checkPropertyRequiresIndexScope(propertiesAsSet, Property.PrivateIndex); + checkPropertyRequiresNodeScope(propertiesAsSet, Property.Consistent); this.properties = propertiesAsSet; } } @@ -178,6 +184,12 @@ public class Setting implements ToXContentObject { } } + private void checkPropertyRequiresNodeScope(final EnumSet properties, final Property property) { + if (properties.contains(property) && properties.contains(Property.NodeScope) == false) { + throw new IllegalArgumentException("non-node-scoped setting [" + key + "] can not have property [" + property + "]"); + } + } + /** * Creates a new Setting instance * @param key the settings key for this setting. @@ -322,6 +334,14 @@ public class Setting implements ToXContentObject { return properties.contains(Property.NodeScope); } + /** + * Returns true if this setting's value can be checked for equality across all nodes. Only {@link SecureSetting} instances + * may have this qualifier. + */ + public boolean isConsistent() { + return properties.contains(Property.Consistent); + } + /** * Returns true if this setting has an index scope, otherwise false */ diff --git a/server/src/main/java/org/elasticsearch/common/settings/Settings.java b/server/src/main/java/org/elasticsearch/common/settings/Settings.java index 72f9406edac..b798b922190 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/Settings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/Settings.java @@ -1350,15 +1350,20 @@ public final class Settings implements ToXContentFragment { } @Override - public SecureString getString(String setting) throws GeneralSecurityException{ + public SecureString getString(String setting) throws GeneralSecurityException { return delegate.getString(addPrefix.apply(setting)); } @Override - public InputStream getFile(String setting) throws GeneralSecurityException{ + public InputStream getFile(String setting) throws GeneralSecurityException { return delegate.getFile(addPrefix.apply(setting)); } + @Override + public byte[] getSHA256Digest(String setting) throws GeneralSecurityException { + return delegate.getSHA256Digest(addPrefix.apply(setting)); + } + @Override public void close() throws IOException { delegate.close(); diff --git a/server/src/main/java/org/elasticsearch/common/settings/SettingsModule.java b/server/src/main/java/org/elasticsearch/common/settings/SettingsModule.java index 6a78e81d7f3..58c9cbc5204 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/SettingsModule.java +++ b/server/src/main/java/org/elasticsearch/common/settings/SettingsModule.java @@ -49,6 +49,7 @@ public class SettingsModule implements Module { private final Set settingsFilterPattern = new HashSet<>(); private final Map> nodeSettings = new HashMap<>(); private final Map> indexSettings = new HashMap<>(); + private final Set> consistentSettings = new HashSet<>(); private final IndexScopedSettings indexScopedSettings; private final ClusterSettings clusterSettings; private final SettingsFilter settingsFilter; @@ -157,7 +158,6 @@ public class SettingsModule implements Module { binder.bind(IndexScopedSettings.class).toInstance(indexScopedSettings); } - /** * Registers a new setting. This method should be used by plugins in order to expose any custom settings the plugin defines. * Unless a setting is registered the setting is unusable. If a setting is never the less specified the node will reject @@ -175,6 +175,19 @@ public class SettingsModule implements Module { if (existingSetting != null) { throw new IllegalArgumentException("Cannot register setting [" + setting.getKey() + "] twice"); } + if (setting.isConsistent()) { + if (setting instanceof Setting.AffixSetting) { + if (((Setting.AffixSetting)setting).getConcreteSettingForNamespace("_na_") instanceof SecureSetting) { + consistentSettings.add(setting); + } else { + throw new IllegalArgumentException("Invalid consistent secure setting [" + setting.getKey() + "]"); + } + } else if (setting instanceof SecureSetting) { + consistentSettings.add(setting); + } else { + throw new IllegalArgumentException("Invalid consistent secure setting [" + setting.getKey() + "]"); + } + } nodeSettings.put(setting.getKey(), setting); } if (setting.hasIndexScope()) { @@ -182,6 +195,9 @@ public class SettingsModule implements Module { if (existingSetting != null) { throw new IllegalArgumentException("Cannot register setting [" + setting.getKey() + "] twice"); } + if (setting.isConsistent()) { + throw new IllegalStateException("Consistent setting [" + setting.getKey() + "] cannot be index scoped"); + } indexSettings.put(setting.getKey(), setting); } } else { @@ -215,6 +231,10 @@ public class SettingsModule implements Module { return clusterSettings; } + public Set> getConsistentSettings() { + return consistentSettings; + } + public SettingsFilter getSettingsFilter() { return settingsFilter; } diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index d0ce6a7786c..0f9c45fd1a7 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -75,6 +75,7 @@ import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.ConsistentSettingsService; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.SettingUpgrader; @@ -362,6 +363,9 @@ public class Node implements Closeable { final ClusterService clusterService = new ClusterService(settings, settingsModule.getClusterSettings(), threadPool); clusterService.addStateApplier(scriptModule.getScriptService()); resourcesToClose.add(clusterService); + clusterService.addLocalNodeMasterListener( + new ConsistentSettingsService(settings, clusterService, settingsModule.getConsistentSettings()) + .newHashPublisher()); final IngestService ingestService = new IngestService(clusterService, threadPool, this.environment, scriptModule.getScriptService(), analysisModule.getAnalysisRegistry(), pluginsService.filterPlugins(IngestPlugin.class)); final DiskThresholdMonitor listener = new DiskThresholdMonitor(settings, clusterService::state, diff --git a/server/src/test/java/org/elasticsearch/common/settings/ConsistentSettingsIT.java b/server/src/test/java/org/elasticsearch/common/settings/ConsistentSettingsIT.java new file mode 100644 index 00000000000..fcb52a3719a --- /dev/null +++ b/server/src/test/java/org/elasticsearch/common/settings/ConsistentSettingsIT.java @@ -0,0 +1,189 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.settings; + +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Setting.AffixSetting; +import org.elasticsearch.env.Environment; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESIntegTestCase; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Function; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST) +public class ConsistentSettingsIT extends ESIntegTestCase { + + static final Setting DUMMY_STRING_CONSISTENT_SETTING = SecureSetting + .secureString("dummy.consistent.secure.string.setting", null, Setting.Property.Consistent); + static final AffixSetting DUMMY_AFFIX_STRING_CONSISTENT_SETTING = Setting.affixKeySetting( + "dummy.consistent.secure.string.affix.setting.", "suffix", + key -> SecureSetting.secureString(key, null, Setting.Property.Consistent)); + private final AtomicReference> nodeSettingsOverride = new AtomicReference<>(null); + + public void testAllConsistentOnAllNodesSuccess() throws Exception { + for (String nodeName : internalCluster().getNodeNames()) { + Environment environment = internalCluster().getInstance(Environment.class, nodeName); + ClusterService clusterService = internalCluster().getInstance(ClusterService.class, nodeName); + assertTrue("Empty settings list always consistent.", + new ConsistentSettingsService(environment.settings(), clusterService, Collections.emptyList()).areAllConsistent()); + assertTrue( + "Simple consistent secure setting is consistent [" + clusterService.state().metaData().hashesOfConsistentSettings() + + "].", + new ConsistentSettingsService(environment.settings(), clusterService, + Collections.singletonList(DUMMY_STRING_CONSISTENT_SETTING)).areAllConsistent()); + assertTrue( + "Affix consistent secure setting is consistent [" + clusterService.state().metaData().hashesOfConsistentSettings() + + "].", + new ConsistentSettingsService(environment.settings(), clusterService, + Collections.singletonList(DUMMY_AFFIX_STRING_CONSISTENT_SETTING)).areAllConsistent()); + assertTrue("All secure settings are consistent [" + clusterService.state().metaData().hashesOfConsistentSettings() + "].", + new ConsistentSettingsService(environment.settings(), clusterService, + Arrays.asList(DUMMY_STRING_CONSISTENT_SETTING, DUMMY_AFFIX_STRING_CONSISTENT_SETTING)).areAllConsistent()); + } + } + + public void testConsistencyFailures() throws Exception { + nodeSettingsOverride.set(nodeOrdinal -> { + Settings.Builder builder = Settings.builder().put(super.nodeSettings(nodeOrdinal)); + MockSecureSettings secureSettings = new MockSecureSettings(); + if (randomBoolean()) { + // different value + secureSettings.setString("dummy.consistent.secure.string.setting", "DIFFERENT_VALUE"); + } else { + // missing value + // secureSettings.setString("dummy.consistent.secure.string.setting", "string_value"); + } + secureSettings.setString("dummy.consistent.secure.string.affix.setting." + "affix1" + ".suffix", "affix_value_1"); + secureSettings.setString("dummy.consistent.secure.string.affix.setting." + "affix2" + ".suffix", "affix_value_2"); + assert builder.getSecureSettings() == null : "Deal with the settings merge"; + builder.setSecureSettings(secureSettings); + return builder.build(); + }); + String newNodeName = internalCluster().startNode(); + Environment environment = internalCluster().getInstance(Environment.class, newNodeName); + ClusterService clusterService = internalCluster().getInstance(ClusterService.class, newNodeName); + assertTrue("Empty settings list always consistent.", + new ConsistentSettingsService(environment.settings(), clusterService, Collections.emptyList()).areAllConsistent()); + assertFalse( + "Simple consistent secure setting is NOT consistent [" + clusterService.state().metaData().hashesOfConsistentSettings() + + "].", + new ConsistentSettingsService(environment.settings(), clusterService, + Collections.singletonList(DUMMY_STRING_CONSISTENT_SETTING)).areAllConsistent()); + assertTrue( + "Affix consistent secure setting is consistent [" + clusterService.state().metaData().hashesOfConsistentSettings() + + "].", + new ConsistentSettingsService(environment.settings(), clusterService, + Collections.singletonList(DUMMY_AFFIX_STRING_CONSISTENT_SETTING)).areAllConsistent()); + assertFalse("All secure settings are NOT consistent [" + clusterService.state().metaData().hashesOfConsistentSettings() + "].", + new ConsistentSettingsService(environment.settings(), clusterService, + Arrays.asList(DUMMY_STRING_CONSISTENT_SETTING, DUMMY_AFFIX_STRING_CONSISTENT_SETTING)).areAllConsistent()); + nodeSettingsOverride.set(nodeOrdinal -> { + Settings.Builder builder = Settings.builder().put(super.nodeSettings(nodeOrdinal)); + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("dummy.consistent.secure.string.setting", "string_value"); + if (randomBoolean()) { + secureSettings.setString("dummy.consistent.secure.string.affix.setting." + "affix1" + ".suffix", "affix_value_1"); + if (randomBoolean()) { + secureSettings.setString("dummy.consistent.secure.string.affix.setting." + "affix2" + ".suffix", "DIFFERENT_VALUE"); + } else { + // missing value + // "dummy.consistent.secure.string.affix.setting.affix2.suffix" + } + } else { + if (randomBoolean()) { + secureSettings.setString("dummy.consistent.secure.string.affix.setting." + "affix1" + ".suffix", "DIFFERENT_VALUE_1"); + secureSettings.setString("dummy.consistent.secure.string.affix.setting." + "affix2" + ".suffix", "DIFFERENT_VALUE_2"); + } else { + // missing values + // dummy.consistent.secure.string.affix.setting.affix1.suffix + // dummy.consistent.secure.string.affix.setting.affix2.suffix + } + } + assert builder.getSecureSettings() == null : "Deal with the settings merge"; + builder.setSecureSettings(secureSettings); + return builder.build(); + }); + newNodeName = internalCluster().startNode(); + environment = internalCluster().getInstance(Environment.class, newNodeName); + clusterService = internalCluster().getInstance(ClusterService.class, newNodeName); + assertTrue("Empty settings list always consistent.", + new ConsistentSettingsService(environment.settings(), clusterService, Collections.emptyList()).areAllConsistent()); + assertTrue( + "Simple consistent secure setting is consistent [" + clusterService.state().metaData().hashesOfConsistentSettings() + + "].", + new ConsistentSettingsService(environment.settings(), clusterService, + Collections.singletonList(DUMMY_STRING_CONSISTENT_SETTING)).areAllConsistent()); + assertFalse( + "Affix consistent secure setting is NOT consistent [" + clusterService.state().metaData().hashesOfConsistentSettings() + + "].", + new ConsistentSettingsService(environment.settings(), clusterService, + Collections.singletonList(DUMMY_AFFIX_STRING_CONSISTENT_SETTING)).areAllConsistent()); + assertFalse("All secure settings are NOT consistent [" + clusterService.state().metaData().hashesOfConsistentSettings() + "].", + new ConsistentSettingsService(environment.settings(), clusterService, + Arrays.asList(DUMMY_STRING_CONSISTENT_SETTING, DUMMY_AFFIX_STRING_CONSISTENT_SETTING)).areAllConsistent()); + nodeSettingsOverride.set(null); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + Function nodeSettingsOverrideFunction = nodeSettingsOverride.get(); + if (nodeSettingsOverrideFunction != null) { + final Settings overrideSettings = nodeSettingsOverrideFunction.apply(nodeOrdinal); + if (overrideSettings != null) { + return overrideSettings; + } + } + Settings.Builder builder = Settings.builder().put(super.nodeSettings(nodeOrdinal)); + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("dummy.consistent.secure.string.setting", "string_value"); + secureSettings.setString("dummy.consistent.secure.string.affix.setting." + "affix1" + ".suffix", "affix_value_1"); + secureSettings.setString("dummy.consistent.secure.string.affix.setting." + "affix2" + ".suffix", "affix_value_2"); + assert builder.getSecureSettings() == null : "Deal with the settings merge"; + builder.setSecureSettings(secureSettings); + return builder.build(); + } + + @Override + protected Collection> nodePlugins() { + Collection> classes = new ArrayList<>(super.nodePlugins()); + classes.add(DummyPlugin.class); + return classes; + } + + public static final class DummyPlugin extends Plugin { + + public DummyPlugin() { + } + + @Override + public List> getSettings() { + List> settings = new ArrayList<>(super.getSettings()); + settings.add(DUMMY_STRING_CONSISTENT_SETTING); + settings.add(DUMMY_AFFIX_STRING_CONSISTENT_SETTING); + return settings; + } + } +} diff --git a/server/src/test/java/org/elasticsearch/common/settings/ConsistentSettingsServiceTests.java b/server/src/test/java/org/elasticsearch/common/settings/ConsistentSettingsServiceTests.java new file mode 100644 index 00000000000..cd3cebe8b20 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/common/settings/ConsistentSettingsServiceTests.java @@ -0,0 +1,167 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.settings; + +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.mock.orig.Mockito; +import org.elasticsearch.test.ESTestCase; +import org.junit.Before; +import org.mockito.stubbing.Answer; + +import java.util.Arrays; +import java.util.Locale; +import java.util.concurrent.atomic.AtomicReference; + +import static org.mockito.Mockito.mock; +import static org.hamcrest.Matchers.is; + +public class ConsistentSettingsServiceTests extends ESTestCase { + + private AtomicReference clusterState = new AtomicReference<>(); + private ClusterService clusterService; + + @Before + public void init() throws Exception { + clusterState.set(ClusterState.EMPTY_STATE); + clusterService = mock(ClusterService.class); + Mockito.doAnswer((Answer) invocation -> { + return clusterState.get(); + }).when(clusterService).state(); + Mockito.doAnswer((Answer) invocation -> { + final ClusterStateUpdateTask arg0 = (ClusterStateUpdateTask) invocation.getArguments()[1]; + this.clusterState.set(arg0.execute(this.clusterState.get())); + return null; + }).when(clusterService).submitStateUpdateTask(Mockito.isA(String.class), Mockito.isA(ClusterStateUpdateTask.class)); + } + + public void testSingleStringSetting() throws Exception { + Setting stringSetting = SecureSetting.secureString("test.simple.foo", null, Setting.Property.Consistent); + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString(stringSetting.getKey(), "somethingsecure"); + secureSettings.setString("test.noise.setting", "noise"); + Settings.Builder builder = Settings.builder(); + builder.setSecureSettings(secureSettings); + Settings settings = builder.build(); + // hashes not yet published + assertThat(new ConsistentSettingsService(settings, clusterService, Arrays.asList(stringSetting)).areAllConsistent(), is(false)); + // publish + new ConsistentSettingsService(settings, clusterService, Arrays.asList(stringSetting)).newHashPublisher().onMaster(); + ConsistentSettingsService consistentService = new ConsistentSettingsService(settings, clusterService, Arrays.asList(stringSetting)); + assertThat(consistentService.areAllConsistent(), is(true)); + // change value + secureSettings.setString(stringSetting.getKey(), "_TYPO_somethingsecure"); + assertThat(consistentService.areAllConsistent(), is(false)); + assertThat(new ConsistentSettingsService(settings, clusterService, Arrays.asList(stringSetting)).areAllConsistent(), is(false)); + // publish change + new ConsistentSettingsService(settings, clusterService, Arrays.asList(stringSetting)).newHashPublisher().onMaster(); + assertThat(consistentService.areAllConsistent(), is(true)); + assertThat(new ConsistentSettingsService(settings, clusterService, Arrays.asList(stringSetting)).areAllConsistent(), is(true)); + } + + public void testSingleAffixSetting() throws Exception { + Setting.AffixSetting affixStringSetting = Setting.affixKeySetting("test.affix.", "bar", + (key) -> SecureSetting.secureString(key, null, Setting.Property.Consistent)); + // add two affix settings to the keystore + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("test.noise.setting", "noise"); + secureSettings.setString("test.affix.first.bar", "first_secure"); + secureSettings.setString("test.affix.second.bar", "second_secure"); + Settings.Builder builder = Settings.builder(); + builder.setSecureSettings(secureSettings); + Settings settings = builder.build(); + // hashes not yet published + assertThat(new ConsistentSettingsService(settings, clusterService, Arrays.asList(affixStringSetting)).areAllConsistent(), + is(false)); + // publish + new ConsistentSettingsService(settings, clusterService, Arrays.asList(affixStringSetting)).newHashPublisher().onMaster(); + ConsistentSettingsService consistentService = new ConsistentSettingsService(settings, clusterService, + Arrays.asList(affixStringSetting)); + assertThat(consistentService.areAllConsistent(), is(true)); + // change value + secureSettings.setString("test.affix.second.bar", "_TYPO_second_secure"); + assertThat(consistentService.areAllConsistent(), is(false)); + assertThat(new ConsistentSettingsService(settings, clusterService, Arrays.asList(affixStringSetting)).areAllConsistent(), + is(false)); + // publish change + new ConsistentSettingsService(settings, clusterService, Arrays.asList(affixStringSetting)).newHashPublisher().onMaster(); + assertThat(consistentService.areAllConsistent(), is(true)); + assertThat(new ConsistentSettingsService(settings, clusterService, Arrays.asList(affixStringSetting)).areAllConsistent(), is(true)); + // add value + secureSettings.setString("test.affix.third.bar", "third_secure"); + builder = Settings.builder(); + builder.setSecureSettings(secureSettings); + settings = builder.build(); + assertThat(new ConsistentSettingsService(settings, clusterService, Arrays.asList(affixStringSetting)).areAllConsistent(), + is(false)); + // publish + new ConsistentSettingsService(settings, clusterService, Arrays.asList(affixStringSetting)).newHashPublisher().onMaster(); + assertThat(new ConsistentSettingsService(settings, clusterService, Arrays.asList(affixStringSetting)).areAllConsistent(), + is(true)); + // remove value + secureSettings = new MockSecureSettings(); + secureSettings.setString("test.another.noise.setting", "noise"); + // missing value test.affix.first.bar + secureSettings.setString("test.affix.second.bar", "second_secure"); + secureSettings.setString("test.affix.third.bar", "third_secure"); + builder = Settings.builder(); + builder.setSecureSettings(secureSettings); + settings = builder.build(); + assertThat(new ConsistentSettingsService(settings, clusterService, Arrays.asList(affixStringSetting)).areAllConsistent(), + is(false)); + } + + public void testStringAndAffixSettings() throws Exception { + Setting stringSetting = SecureSetting.secureString("mock.simple.foo", null, Setting.Property.Consistent); + Setting.AffixSetting affixStringSetting = Setting.affixKeySetting("mock.affix.", "bar", + (key) -> SecureSetting.secureString(key, null, Setting.Property.Consistent)); + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString(randomAlphaOfLength(8).toLowerCase(Locale.ROOT), "noise"); + secureSettings.setString(stringSetting.getKey(), "somethingsecure"); + secureSettings.setString("mock.affix.foo.bar", "another_secure"); + Settings.Builder builder = Settings.builder(); + builder.setSecureSettings(secureSettings); + Settings settings = builder.build(); + // hashes not yet published + assertThat(new ConsistentSettingsService(settings, clusterService, Arrays.asList(stringSetting, affixStringSetting)) + .areAllConsistent(), is(false)); + // publish only the simple string setting + new ConsistentSettingsService(settings, clusterService, Arrays.asList(stringSetting)).newHashPublisher().onMaster(); + assertThat(new ConsistentSettingsService(settings, clusterService, Arrays.asList(stringSetting)).areAllConsistent(), is(true)); + assertThat(new ConsistentSettingsService(settings, clusterService, Arrays.asList(affixStringSetting)).areAllConsistent(), + is(false)); + assertThat(new ConsistentSettingsService(settings, clusterService, Arrays.asList(stringSetting, affixStringSetting)) + .areAllConsistent(), is(false)); + // publish only the affix string setting + new ConsistentSettingsService(settings, clusterService, Arrays.asList(affixStringSetting)).newHashPublisher().onMaster(); + assertThat(new ConsistentSettingsService(settings, clusterService, Arrays.asList(stringSetting)).areAllConsistent(), is(false)); + assertThat(new ConsistentSettingsService(settings, clusterService, Arrays.asList(affixStringSetting)).areAllConsistent(), is(true)); + assertThat(new ConsistentSettingsService(settings, clusterService, Arrays.asList(stringSetting, affixStringSetting)) + .areAllConsistent(), is(false)); + // publish both settings + new ConsistentSettingsService(settings, clusterService, Arrays.asList(stringSetting, affixStringSetting)).newHashPublisher() + .onMaster(); + assertThat(new ConsistentSettingsService(settings, clusterService, Arrays.asList(stringSetting)).areAllConsistent(), is(true)); + assertThat(new ConsistentSettingsService(settings, clusterService, Arrays.asList(affixStringSetting)).areAllConsistent(), is(true)); + assertThat(new ConsistentSettingsService(settings, clusterService, Arrays.asList(stringSetting, affixStringSetting)) + .areAllConsistent(), is(true)); + } +} diff --git a/server/src/test/java/org/elasticsearch/common/settings/KeyStoreWrapperTests.java b/server/src/test/java/org/elasticsearch/common/settings/KeyStoreWrapperTests.java index 68f434b1796..7df1b2d6f75 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/KeyStoreWrapperTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/KeyStoreWrapperTests.java @@ -51,12 +51,14 @@ import java.nio.file.Files; import java.nio.file.Path; import java.security.GeneralSecurityException; import java.security.KeyStore; +import java.security.MessageDigest; import java.security.SecureRandom; import java.util.ArrayList; import java.util.Arrays; import java.util.Base64; import java.util.HashSet; import java.util.List; +import java.util.Locale; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -127,6 +129,27 @@ public class KeyStoreWrapperTests extends ESTestCase { assertThat(exception.getMessage(), containsString("closed")); } + public void testValueSHA256Digest() throws Exception { + final KeyStoreWrapper keystore = KeyStoreWrapper.create(); + final String stringSettingKeyName = randomAlphaOfLength(5).toLowerCase(Locale.ROOT) + "1"; + final String stringSettingValue = randomAlphaOfLength(32); + keystore.setString(stringSettingKeyName, stringSettingValue.toCharArray()); + final String fileSettingKeyName = randomAlphaOfLength(5).toLowerCase(Locale.ROOT) + "2"; + final byte[] fileSettingValue = randomByteArrayOfLength(32); + keystore.setFile(fileSettingKeyName, fileSettingValue); + + final byte[] stringSettingHash = MessageDigest.getInstance("SHA-256").digest(stringSettingValue.getBytes(StandardCharsets.UTF_8)); + assertThat(keystore.getSHA256Digest(stringSettingKeyName), equalTo(stringSettingHash)); + final byte[] fileSettingHash = MessageDigest.getInstance("SHA-256").digest(fileSettingValue); + assertThat(keystore.getSHA256Digest(fileSettingKeyName), equalTo(fileSettingHash)); + + keystore.close(); + + // value hashes accessible even when the keystore is closed + assertThat(keystore.getSHA256Digest(stringSettingKeyName), equalTo(stringSettingHash)); + assertThat(keystore.getSHA256Digest(fileSettingKeyName), equalTo(fileSettingHash)); + } + public void testUpgradeNoop() throws Exception { KeyStoreWrapper keystore = KeyStoreWrapper.create(); SecureString seed = keystore.getString(KeyStoreWrapper.SEED_SETTING.getKey()); diff --git a/server/src/test/java/org/elasticsearch/common/settings/SettingsModuleTests.java b/server/src/test/java/org/elasticsearch/common/settings/SettingsModuleTests.java index c6182eac8f6..c374984eb5d 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/SettingsModuleTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/SettingsModuleTests.java @@ -21,11 +21,13 @@ package org.elasticsearch.common.settings; import org.elasticsearch.common.inject.ModuleTestCase; import org.elasticsearch.common.settings.Setting.Property; +import org.hamcrest.Matchers; import java.util.Arrays; import static java.util.Collections.emptySet; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; public class SettingsModuleTests extends ModuleTestCase { @@ -85,6 +87,40 @@ public class SettingsModuleTests extends ModuleTestCase { } } + public void testRegisterConsistentSettings() { + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("some.custom.secure.consistent.setting", "secure_value"); + final Settings settings = Settings.builder().setSecureSettings(secureSettings).build(); + final Setting concreteConsistentSetting = SecureSetting.secureString("some.custom.secure.consistent.setting", null, + Setting.Property.Consistent); + SettingsModule module = new SettingsModule(settings, concreteConsistentSetting); + assertInstanceBinding(module, Settings.class, (s) -> s == settings); + assertThat(module.getConsistentSettings(), Matchers.containsInAnyOrder(concreteConsistentSetting)); + + final Setting concreteUnsecureConsistentSetting = Setting.simpleString("some.custom.UNSECURE.consistent.setting", + Property.Consistent, Property.NodeScope); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> new SettingsModule(Settings.builder().build(), concreteUnsecureConsistentSetting)); + assertThat(e.getMessage(), is("Invalid consistent secure setting [some.custom.UNSECURE.consistent.setting]")); + + secureSettings = new MockSecureSettings(); + secureSettings.setString("some.custom.secure.consistent.afix.wow.setting", "secure_value"); + final Settings settings2 = Settings.builder().setSecureSettings(secureSettings).build(); + final Setting afixConcreteConsistentSetting = Setting.affixKeySetting( + "some.custom.secure.consistent.afix.", "setting", + key -> SecureSetting.secureString(key, null, Setting.Property.Consistent)); + module = new SettingsModule(settings2,afixConcreteConsistentSetting); + assertInstanceBinding(module, Settings.class, (s) -> s == settings2); + assertThat(module.getConsistentSettings(), Matchers.containsInAnyOrder(afixConcreteConsistentSetting)); + + final Setting concreteUnsecureConsistentAfixSetting = Setting.affixKeySetting( + "some.custom.secure.consistent.afix.", "setting", + key -> Setting.simpleString(key, Setting.Property.Consistent, Property.NodeScope)); + e = expectThrows(IllegalArgumentException.class, + () -> new SettingsModule(Settings.builder().build(), concreteUnsecureConsistentAfixSetting)); + assertThat(e.getMessage(), is("Invalid consistent secure setting [some.custom.secure.consistent.afix.*.setting]")); + } + public void testLoggerSettings() { { Settings settings = Settings.builder().put("logger._root", "TRACE").put("logger.transport", "INFO").build(); diff --git a/test/framework/src/main/java/org/elasticsearch/common/settings/MockSecureSettings.java b/test/framework/src/main/java/org/elasticsearch/common/settings/MockSecureSettings.java index 3a6161a9f7f..84689cf223d 100644 --- a/test/framework/src/main/java/org/elasticsearch/common/settings/MockSecureSettings.java +++ b/test/framework/src/main/java/org/elasticsearch/common/settings/MockSecureSettings.java @@ -19,9 +19,12 @@ package org.elasticsearch.common.settings; +import org.elasticsearch.common.hash.MessageDigests; + import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; +import java.nio.charset.StandardCharsets; import java.util.HashMap; import java.util.HashSet; import java.util.Map; @@ -35,6 +38,7 @@ public class MockSecureSettings implements SecureSettings { private Map secureStrings = new HashMap<>(); private Map files = new HashMap<>(); + private Map sha256Digests = new HashMap<>(); private Set settingNames = new HashSet<>(); private final AtomicBoolean closed = new AtomicBoolean(false); @@ -44,6 +48,7 @@ public class MockSecureSettings implements SecureSettings { private MockSecureSettings(MockSecureSettings source) { secureStrings.putAll(source.secureStrings); files.putAll(source.files); + sha256Digests.putAll(source.sha256Digests); settingNames.addAll(source.settingNames); } @@ -69,15 +74,22 @@ public class MockSecureSettings implements SecureSettings { return new ByteArrayInputStream(files.get(setting)); } + @Override + public byte[] getSHA256Digest(String setting) { + return sha256Digests.get(setting); + } + public void setString(String setting, String value) { ensureOpen(); secureStrings.put(setting, new SecureString(value.toCharArray())); + sha256Digests.put(setting, MessageDigests.sha256().digest(value.getBytes(StandardCharsets.UTF_8))); settingNames.add(setting); } public void setFile(String setting, byte[] value) { ensureOpen(); files.put(setting, value); + sha256Digests.put(setting, MessageDigests.sha256().digest(value)); settingNames.add(setting); } @@ -90,6 +102,7 @@ public class MockSecureSettings implements SecureSettings { } settingNames.addAll(secureSettings.settingNames); secureStrings.putAll(secureSettings.secureStrings); + sha256Digests.putAll(secureSettings.sha256Digests); files.putAll(secureSettings.files); } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/NotificationService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/NotificationService.java index c2a079e519f..c6c041a6571 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/NotificationService.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/NotificationService.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.watcher.notification; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.SecureSettings; import org.elasticsearch.common.settings.SecureString; @@ -179,12 +180,13 @@ public abstract class NotificationService { // get the secure settings out final SecureSettings sourceSecureSettings = Settings.builder().put(source, true).getSecureSettings(); // filter and cache them... - final Map cache = new HashMap<>(); + final Map> cache = new HashMap<>(); if (sourceSecureSettings != null && securePluginSettings != null) { for (final String settingKey : sourceSecureSettings.getSettingNames()) { for (final Setting secureSetting : securePluginSettings) { if (secureSetting.match(settingKey)) { - cache.put(settingKey, sourceSecureSettings.getString(settingKey)); + cache.put(settingKey, + new Tuple<>(sourceSecureSettings.getString(settingKey), sourceSecureSettings.getSHA256Digest(settingKey))); } } } @@ -197,8 +199,8 @@ public abstract class NotificationService { } @Override - public SecureString getString(String setting) throws GeneralSecurityException { - return cache.get(setting); + public SecureString getString(String setting) { + return cache.get(setting).v1(); } @Override @@ -207,10 +209,15 @@ public abstract class NotificationService { } @Override - public InputStream getFile(String setting) throws GeneralSecurityException { + public InputStream getFile(String setting) { throw new IllegalStateException("A NotificationService setting cannot be File."); } + @Override + public byte[] getSHA256Digest(String setting) { + return cache.get(setting).v2(); + } + @Override public void close() throws IOException { } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/NotificationServiceTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/NotificationServiceTests.java index efbefdd6408..0fa05e900e5 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/NotificationServiceTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/NotificationServiceTests.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.watcher.notification; +import org.elasticsearch.common.hash.MessageDigests; import org.elasticsearch.common.settings.SecureSetting; import org.elasticsearch.common.settings.SecureSettings; import org.elasticsearch.common.settings.SecureString; @@ -16,6 +17,7 @@ import org.elasticsearch.xpack.watcher.notification.NotificationService; import java.io.IOException; import java.io.InputStream; +import java.nio.charset.StandardCharsets; import java.security.GeneralSecurityException; import java.util.Arrays; import java.util.Collections; @@ -247,6 +249,11 @@ public class NotificationServiceTests extends ESTestCase { return null; } + @Override + public byte[] getSHA256Digest(String setting) throws GeneralSecurityException { + return MessageDigests.sha256().digest(new String(secureSettingsMap.get(setting)).getBytes(StandardCharsets.UTF_8)); + } + @Override public void close() throws IOException { } From 55b3ec8d7b5020770be9ee02545933313c174680 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Sat, 29 Jun 2019 18:26:08 -0400 Subject: [PATCH 40/42] Make peer recovery clean files step async (#43787) Relates #36195 --- .../recovery/PeerRecoveryTargetService.java | 8 +- .../recovery/RecoverySourceHandler.java | 194 +++++++++--------- .../indices/recovery/RecoveryTarget.java | 100 ++++----- .../recovery/RecoveryTargetHandler.java | 3 +- .../recovery/RemoteRecoveryTargetHandler.java | 6 +- .../IndexLevelReplicationTests.java | 17 +- .../RecoveryDuringReplicationTests.java | 5 +- .../PeerRecoveryTargetServiceTests.java | 6 +- .../recovery/RecoverySourceHandlerTests.java | 8 +- .../indices/recovery/RecoveryTests.java | 6 +- .../indices/recovery/AsyncRecoveryTarget.java | 6 +- 11 files changed, 184 insertions(+), 175 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java index 6b1a893667f..50abeb2fb7a 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java @@ -544,10 +544,10 @@ public class PeerRecoveryTargetService implements IndexEventListener { @Override public void messageReceived(RecoveryCleanFilesRequest request, TransportChannel channel, Task task) throws Exception { - try (RecoveryRef recoveryRef = onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId() - )) { - recoveryRef.target().cleanFiles(request.totalTranslogOps(), request.getGlobalCheckpoint(), request.sourceMetaSnapshot()); - channel.sendResponse(TransportResponse.Empty.INSTANCE); + try (RecoveryRef recoveryRef = onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId())) { + final ActionListener listener = new ChannelActionListener<>(channel, Actions.CLEAN_FILES, request); + recoveryRef.target().cleanFiles(request.totalTranslogOps(), request.getGlobalCheckpoint(), request.sourceMetaSnapshot(), + ActionListener.map(listener, nullVal -> TransportResponse.Empty.INSTANCE)); } } } diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index f3e10c13c21..959c13ccb89 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -40,6 +40,7 @@ import org.elasticsearch.common.StopWatch; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.lucene.store.InputStreamIndexInput; import org.elasticsearch.common.unit.ByteSizeValue; @@ -75,7 +76,7 @@ import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; -import java.util.function.Supplier; +import java.util.function.IntSupplier; import java.util.stream.StreamSupport; import static org.elasticsearch.index.seqno.SequenceNumbers.NO_OPS_PERFORMED; @@ -160,15 +161,21 @@ public class RecoverySourceHandler { final long startingSeqNo; final boolean isSequenceNumberBasedRecovery = request.startingSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO && isTargetSameHistory() && shard.hasCompleteHistoryOperations("peer-recovery", request.startingSeqNo()); - final SendFileResult sendFileResult; + + final StepListener sendFileStep = new StepListener<>(); + final StepListener prepareEngineStep = new StepListener<>(); + final StepListener sendSnapshotStep = new StepListener<>(); + final StepListener finalizeStep = new StepListener<>(); + if (isSequenceNumberBasedRecovery) { logger.trace("performing sequence numbers based recovery. starting at [{}]", request.startingSeqNo()); startingSeqNo = request.startingSeqNo(); - sendFileResult = SendFileResult.EMPTY; + sendFileStep.onResponse(SendFileResult.EMPTY); } else { - final Engine.IndexCommitRef phase1Snapshot; + final Engine.IndexCommitRef safeCommitRef; try { - phase1Snapshot = shard.acquireSafeIndexCommit(); + safeCommitRef = shard.acquireSafeIndexCommit(); + resources.add(safeCommitRef); } catch (final Exception e) { throw new RecoveryEngineException(shard.shardId(), 1, "snapshot failed", e); } @@ -177,24 +184,29 @@ public class RecoverySourceHandler { startingSeqNo = 0; try { final int estimateNumOps = shard.estimateNumberOfHistoryOperations("peer-recovery", startingSeqNo); - sendFileResult = phase1(phase1Snapshot.getIndexCommit(), shard.getLastKnownGlobalCheckpoint(), () -> estimateNumOps); + shard.store().incRef(); + final Releasable releaseStore = Releasables.releaseOnce(shard.store()::decRef); + resources.add(releaseStore); + sendFileStep.whenComplete(r -> IOUtils.close(safeCommitRef, releaseStore), e -> { + try { + IOUtils.close(safeCommitRef, releaseStore); + } catch (final IOException ex) { + logger.warn("releasing snapshot caused exception", ex); + } + }); + phase1(safeCommitRef.getIndexCommit(), shard.getLastKnownGlobalCheckpoint(), () -> estimateNumOps, sendFileStep); } catch (final Exception e) { - throw new RecoveryEngineException(shard.shardId(), 1, "phase1 failed", e); - } finally { - try { - IOUtils.close(phase1Snapshot); - } catch (final IOException ex) { - logger.warn("releasing snapshot caused exception", ex); - } + throw new RecoveryEngineException(shard.shardId(), 1, "sendFileStep failed", e); } } assert startingSeqNo >= 0 : "startingSeqNo must be non negative. got: " + startingSeqNo; - final StepListener prepareEngineStep = new StepListener<>(); - // For a sequence based recovery, the target can keep its local translog - prepareTargetForTranslog(isSequenceNumberBasedRecovery == false, - shard.estimateNumberOfHistoryOperations("peer-recovery", startingSeqNo), prepareEngineStep); - final StepListener sendSnapshotStep = new StepListener<>(); + sendFileStep.whenComplete(r -> { + // For a sequence based recovery, the target can keep its local translog + prepareTargetForTranslog(isSequenceNumberBasedRecovery == false, + shard.estimateNumberOfHistoryOperations("peer-recovery", startingSeqNo), prepareEngineStep); + }, onFailure); + prepareEngineStep.whenComplete(prepareEngineTime -> { /* * add shard to replication group (shard will receive replication requests from this point on) now that engine is open. @@ -231,12 +243,12 @@ public class RecoverySourceHandler { }, onFailure); - final StepListener finalizeStep = new StepListener<>(); sendSnapshotStep.whenComplete(r -> finalizeRecovery(r.targetLocalCheckpoint, finalizeStep), onFailure); finalizeStep.whenComplete(r -> { final long phase1ThrottlingWaitTime = 0L; // TODO: return the actual throttle time final SendSnapshotResult sendSnapshotResult = sendSnapshotStep.result(); + final SendFileResult sendFileResult = sendFileStep.result(); final RecoveryResponse response = new RecoveryResponse(sendFileResult.phase1FileNames, sendFileResult.phase1FileSizes, sendFileResult.phase1ExistingFileNames, sendFileResult.phase1ExistingFileSizes, sendFileResult.totalSize, sendFileResult.existingTotalSize, sendFileResult.took.millis(), phase1ThrottlingWaitTime, @@ -333,18 +345,17 @@ public class RecoverySourceHandler { * segments that are missing. Only segments that have the same size and * checksum can be reused */ - public SendFileResult phase1(final IndexCommit snapshot, final long globalCheckpoint, final Supplier translogOps) { + void phase1(IndexCommit snapshot, long globalCheckpoint, IntSupplier translogOps, ActionListener listener) { cancellableThreads.checkForCancel(); // Total size of segment files that are recovered - long totalSize = 0; + long totalSizeInBytes = 0; // Total size of segment files that were able to be re-used - long existingTotalSize = 0; + long existingTotalSizeInBytes = 0; final List phase1FileNames = new ArrayList<>(); final List phase1FileSizes = new ArrayList<>(); final List phase1ExistingFileNames = new ArrayList<>(); final List phase1ExistingFileSizes = new ArrayList<>(); final Store store = shard.store(); - store.incRef(); try { StopWatch stopWatch = new StopWatch().start(); final Store.MetadataSnapshot recoverySourceMetadata; @@ -370,12 +381,12 @@ public class RecoverySourceHandler { for (StoreFileMetaData md : diff.identical) { phase1ExistingFileNames.add(md.name()); phase1ExistingFileSizes.add(md.length()); - existingTotalSize += md.length(); + existingTotalSizeInBytes += md.length(); if (logger.isTraceEnabled()) { logger.trace("recovery [phase1]: not recovering [{}], exist in local store and has checksum [{}]," + " size [{}]", md.name(), md.checksum(), md.length()); } - totalSize += md.length(); + totalSizeInBytes += md.length(); } List phase1Files = new ArrayList<>(diff.different.size() + diff.missing.size()); phase1Files.addAll(diff.different); @@ -389,75 +400,33 @@ public class RecoverySourceHandler { } phase1FileNames.add(md.name()); phase1FileSizes.add(md.length()); - totalSize += md.length(); + totalSizeInBytes += md.length(); } logger.trace("recovery [phase1]: recovering_files [{}] with total_size [{}], reusing_files [{}] with total_size [{}]", - phase1FileNames.size(), new ByteSizeValue(totalSize), - phase1ExistingFileNames.size(), new ByteSizeValue(existingTotalSize)); + phase1FileNames.size(), new ByteSizeValue(totalSizeInBytes), + phase1ExistingFileNames.size(), new ByteSizeValue(existingTotalSizeInBytes)); cancellableThreads.execute(() -> recoveryTarget.receiveFileInfo( - phase1FileNames, phase1FileSizes, phase1ExistingFileNames, phase1ExistingFileSizes, translogOps.get())); + phase1FileNames, phase1FileSizes, phase1ExistingFileNames, phase1ExistingFileSizes, translogOps.getAsInt())); sendFiles(store, phase1Files.toArray(new StoreFileMetaData[0]), translogOps); - // Send the CLEAN_FILES request, which takes all of the files that - // were transferred and renames them from their temporary file - // names to the actual file names. It also writes checksums for - // the files after they have been renamed. - // - // Once the files have been renamed, any other files that are not - // related to this recovery (out of date segments, for example) - // are deleted - try { - cancellableThreads.executeIO(() -> - recoveryTarget.cleanFiles(translogOps.get(), globalCheckpoint, recoverySourceMetadata)); - } catch (RemoteTransportException | IOException targetException) { - final IOException corruptIndexException; - // we realized that after the index was copied and we wanted to finalize the recovery - // the index was corrupted: - // - maybe due to a broken segments file on an empty index (transferred with no checksum) - // - maybe due to old segments without checksums or length only checks - if ((corruptIndexException = ExceptionsHelper.unwrapCorruption(targetException)) != null) { - try { - final Store.MetadataSnapshot recoverySourceMetadata1 = store.getMetadata(snapshot); - StoreFileMetaData[] metadata = - StreamSupport.stream(recoverySourceMetadata1.spliterator(), false).toArray(StoreFileMetaData[]::new); - ArrayUtil.timSort(metadata, Comparator.comparingLong(StoreFileMetaData::length)); // check small files first - for (StoreFileMetaData md : metadata) { - cancellableThreads.checkForCancel(); - logger.debug("checking integrity for file {} after remove corruption exception", md); - if (store.checkIntegrityNoException(md) == false) { // we are corrupted on the primary -- fail! - shard.failShard("recovery", corruptIndexException); - logger.warn("Corrupted file detected {} checksum mismatch", md); - throw corruptIndexException; - } - } - } catch (IOException ex) { - targetException.addSuppressed(ex); - throw targetException; - } - // corruption has happened on the way to replica - RemoteTransportException exception = new RemoteTransportException("File corruption occurred on recovery but " + - "checksums are ok", null); - exception.addSuppressed(targetException); - logger.warn(() -> new ParameterizedMessage( - "{} Remote file corruption during finalization of recovery on node {}. local checksum OK", - shard.shardId(), request.targetNode()), corruptIndexException); - throw exception; - } else { - throw targetException; - } - } + final long totalSize = totalSizeInBytes; + final long existingTotalSize = existingTotalSizeInBytes; + cleanFiles(store, recoverySourceMetadata, translogOps, globalCheckpoint, ActionListener.map(listener, aVoid -> { + final TimeValue took = stopWatch.totalTime(); + logger.trace("recovery [phase1]: took [{}]", took); + return new SendFileResult(phase1FileNames, phase1FileSizes, totalSize, phase1ExistingFileNames, + phase1ExistingFileSizes, existingTotalSize, took); + })); } else { logger.trace("skipping [phase1]- identical sync id [{}] found on both source and target", recoverySourceMetadata.getSyncId()); + final TimeValue took = stopWatch.totalTime(); + logger.trace("recovery [phase1]: took [{}]", took); + listener.onResponse(new SendFileResult(phase1FileNames, phase1FileSizes, totalSizeInBytes, phase1ExistingFileNames, + phase1ExistingFileSizes, existingTotalSizeInBytes, took)); } - final TimeValue took = stopWatch.totalTime(); - logger.trace("recovery [phase1]: took [{}]", took); - return new SendFileResult(phase1FileNames, phase1FileSizes, totalSize, phase1ExistingFileNames, - phase1ExistingFileSizes, existingTotalSize, took); } catch (Exception e) { - throw new RecoverFilesRecoveryException(request.shardId(), phase1FileNames.size(), new ByteSizeValue(totalSize), e); - } finally { - store.decRef(); + throw new RecoverFilesRecoveryException(request.shardId(), phase1FileNames.size(), new ByteSizeValue(totalSizeInBytes), e); } } @@ -695,7 +664,7 @@ public class RecoverySourceHandler { '}'; } - void sendFiles(Store store, StoreFileMetaData[] files, Supplier translogOps) throws Exception { + void sendFiles(Store store, StoreFileMetaData[] files, IntSupplier translogOps) throws Exception { ArrayUtil.timSort(files, Comparator.comparingLong(StoreFileMetaData::length)); // send smallest first final LocalCheckpointTracker requestSeqIdTracker = new LocalCheckpointTracker(NO_OPS_PERFORMED, NO_OPS_PERFORMED); final AtomicReference> error = new AtomicReference<>(); @@ -720,7 +689,7 @@ public class RecoverySourceHandler { } final long requestFilePosition = position; cancellableThreads.executeIO(() -> - recoveryTarget.writeFileChunk(md, requestFilePosition, content, lastChunk, translogOps.get(), + recoveryTarget.writeFileChunk(md, requestFilePosition, content, lastChunk, translogOps.getAsInt(), ActionListener.wrap( r -> requestSeqIdTracker.markSeqNoAsProcessed(requestSeqId), e -> { @@ -741,24 +710,53 @@ public class RecoverySourceHandler { cancellableThreads.execute(() -> requestSeqIdTracker.waitForProcessedOpsToComplete(requestSeqIdTracker.getMaxSeqNo())); } if (error.get() != null) { - handleErrorOnSendFiles(store, error.get().v1(), error.get().v2()); + handleErrorOnSendFiles(store, error.get().v2(), new StoreFileMetaData[]{error.get().v1()}); } } - private void handleErrorOnSendFiles(Store store, StoreFileMetaData md, Exception e) throws Exception { - final IOException corruptIndexException; - if ((corruptIndexException = ExceptionsHelper.unwrapCorruption(e)) != null) { - if (store.checkIntegrityNoException(md) == false) { // we are corrupted on the primary -- fail! - logger.warn("{} Corrupted file detected {} checksum mismatch", shardId, md); - failEngine(corruptIndexException); - throw corruptIndexException; + private void cleanFiles(Store store, Store.MetadataSnapshot sourceMetadata, IntSupplier translogOps, + long globalCheckpoint, ActionListener listener) { + // Send the CLEAN_FILES request, which takes all of the files that + // were transferred and renames them from their temporary file + // names to the actual file names. It also writes checksums for + // the files after they have been renamed. + // + // Once the files have been renamed, any other files that are not + // related to this recovery (out of date segments, for example) + // are deleted + cancellableThreads.execute(() -> recoveryTarget.cleanFiles(translogOps.getAsInt(), globalCheckpoint, sourceMetadata, + ActionListener.delegateResponse(listener, (l, e) -> ActionListener.completeWith(l, () -> { + StoreFileMetaData[] mds = StreamSupport.stream(sourceMetadata.spliterator(), false).toArray(StoreFileMetaData[]::new); + ArrayUtil.timSort(mds, Comparator.comparingLong(StoreFileMetaData::length)); // check small files first + handleErrorOnSendFiles(store, e, mds); + throw e; + })))); + } + + private void handleErrorOnSendFiles(Store store, Exception e, StoreFileMetaData[] mds) throws Exception { + final IOException corruptIndexException = ExceptionsHelper.unwrapCorruption(e); + if (corruptIndexException != null) { + Exception localException = null; + for (StoreFileMetaData md : mds) { + cancellableThreads.checkForCancel(); + logger.debug("checking integrity for file {} after remove corruption exception", md); + if (store.checkIntegrityNoException(md) == false) { // we are corrupted on the primary -- fail! + logger.warn("{} Corrupted file detected {} checksum mismatch", shardId, md); + if (localException == null) { + localException = corruptIndexException; + } + failEngine(corruptIndexException); + } + } + if (localException != null) { + throw localException; } else { // corruption has happened on the way to replica - RemoteTransportException exception = new RemoteTransportException( + RemoteTransportException remoteException = new RemoteTransportException( "File corruption occurred on recovery but checksums are ok", null); - exception.addSuppressed(e); + remoteException.addSuppressed(e); logger.warn(() -> new ParameterizedMessage("{} Remote file corruption on node {}, recovering {}. local checksum OK", - shardId, request.targetNode(), md), corruptIndexException); - throw exception; + shardId, request.targetNode(), mds), corruptIndexException); + throw remoteException; } } else { throw e; diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java index bbd0cea04af..55aa5b22595 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java @@ -392,57 +392,61 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget } @Override - public void cleanFiles(int totalTranslogOps, long globalCheckpoint, Store.MetadataSnapshot sourceMetaData) throws IOException { - state().getTranslog().totalOperations(totalTranslogOps); - // first, we go and move files that were created with the recovery id suffix to - // the actual names, its ok if we have a corrupted index here, since we have replicas - // to recover from in case of a full cluster shutdown just when this code executes... - multiFileWriter.renameAllTempFiles(); - final Store store = store(); - store.incRef(); - try { - store.cleanupAndVerify("recovery CleanFilesRequestHandler", sourceMetaData); - if (indexShard.indexSettings().getIndexVersionCreated().before(Version.V_6_0_0_rc1)) { - store.ensureIndexHasHistoryUUID(); - } - final String translogUUID = Translog.createEmptyTranslog( - indexShard.shardPath().resolveTranslog(), globalCheckpoint, shardId, indexShard.getPendingPrimaryTerm()); - store.associateIndexWithNewTranslog(translogUUID); - - if (indexShard.getRetentionLeases().leases().isEmpty()) { - // if empty, may be a fresh IndexShard, so write an empty leases file to disk - indexShard.persistRetentionLeases(); - assert indexShard.loadRetentionLeases().leases().isEmpty(); - } else { - assert indexShard.assertRetentionLeasesPersisted(); - } - - } catch (CorruptIndexException | IndexFormatTooNewException | IndexFormatTooOldException ex) { - // this is a fatal exception at this stage. - // this means we transferred files from the remote that have not be checksummed and they are - // broken. We have to clean up this shard entirely, remove all files and bubble it up to the - // source shard since this index might be broken there as well? The Source can handle this and checks - // its content on disk if possible. + public void cleanFiles(int totalTranslogOps, long globalCheckpoint, Store.MetadataSnapshot sourceMetaData, + ActionListener listener) { + ActionListener.completeWith(listener, () -> { + state().getTranslog().totalOperations(totalTranslogOps); + // first, we go and move files that were created with the recovery id suffix to + // the actual names, its ok if we have a corrupted index here, since we have replicas + // to recover from in case of a full cluster shutdown just when this code executes... + multiFileWriter.renameAllTempFiles(); + final Store store = store(); + store.incRef(); try { - try { - store.removeCorruptionMarker(); - } finally { - Lucene.cleanLuceneIndex(store.directory()); // clean up and delete all files + store.cleanupAndVerify("recovery CleanFilesRequestHandler", sourceMetaData); + if (indexShard.indexSettings().getIndexVersionCreated().before(Version.V_6_0_0_rc1)) { + store.ensureIndexHasHistoryUUID(); } - } catch (Exception e) { - logger.debug("Failed to clean lucene index", e); - ex.addSuppressed(e); + final String translogUUID = Translog.createEmptyTranslog( + indexShard.shardPath().resolveTranslog(), globalCheckpoint, shardId, indexShard.getPendingPrimaryTerm()); + store.associateIndexWithNewTranslog(translogUUID); + + if (indexShard.getRetentionLeases().leases().isEmpty()) { + // if empty, may be a fresh IndexShard, so write an empty leases file to disk + indexShard.persistRetentionLeases(); + assert indexShard.loadRetentionLeases().leases().isEmpty(); + } else { + assert indexShard.assertRetentionLeasesPersisted(); + } + + } catch (CorruptIndexException | IndexFormatTooNewException | IndexFormatTooOldException ex) { + // this is a fatal exception at this stage. + // this means we transferred files from the remote that have not be checksummed and they are + // broken. We have to clean up this shard entirely, remove all files and bubble it up to the + // source shard since this index might be broken there as well? The Source can handle this and checks + // its content on disk if possible. + try { + try { + store.removeCorruptionMarker(); + } finally { + Lucene.cleanLuceneIndex(store.directory()); // clean up and delete all files + } + } catch (Exception e) { + logger.debug("Failed to clean lucene index", e); + ex.addSuppressed(e); + } + RecoveryFailedException rfe = new RecoveryFailedException(state(), "failed to clean after recovery", ex); + fail(rfe, true); + throw rfe; + } catch (Exception ex) { + RecoveryFailedException rfe = new RecoveryFailedException(state(), "failed to clean after recovery", ex); + fail(rfe, true); + throw rfe; + } finally { + store.decRef(); } - RecoveryFailedException rfe = new RecoveryFailedException(state(), "failed to clean after recovery", ex); - fail(rfe, true); - throw rfe; - } catch (Exception ex) { - RecoveryFailedException rfe = new RecoveryFailedException(state(), "failed to clean after recovery", ex); - fail(rfe, true); - throw rfe; - } finally { - store.decRef(); - } + return null; + }); } @Override diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetHandler.java index d03fe42d901..89f4cb22c2b 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetHandler.java @@ -26,7 +26,6 @@ import org.elasticsearch.index.store.Store; import org.elasticsearch.index.store.StoreFileMetaData; import org.elasticsearch.index.translog.Translog; -import java.io.IOException; import java.util.List; public interface RecoveryTargetHandler { @@ -99,7 +98,7 @@ public interface RecoveryTargetHandler { * @param globalCheckpoint the global checkpoint on the primary * @param sourceMetaData meta data of the source store */ - void cleanFiles(int totalTranslogOps, long globalCheckpoint, Store.MetadataSnapshot sourceMetaData) throws IOException; + void cleanFiles(int totalTranslogOps, long globalCheckpoint, Store.MetadataSnapshot sourceMetaData, ActionListener listener); /** writes a partial file chunk to the target store */ void writeFileChunk(StoreFileMetaData fileMetaData, long position, BytesReference content, diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java index ec3c22d42a1..6b786fdae4d 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java @@ -140,11 +140,13 @@ public class RemoteRecoveryTargetHandler implements RecoveryTargetHandler { } @Override - public void cleanFiles(int totalTranslogOps, long globalCheckpoint, Store.MetadataSnapshot sourceMetaData) throws IOException { + public void cleanFiles(int totalTranslogOps, long globalCheckpoint, Store.MetadataSnapshot sourceMetaData, + ActionListener listener) { transportService.submitRequest(targetNode, PeerRecoveryTargetService.Actions.CLEAN_FILES, new RecoveryCleanFilesRequest(recoveryId, shardId, sourceMetaData, totalTranslogOps, globalCheckpoint), TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionTimeout()).build(), - EmptyTransportResponseHandler.INSTANCE_SAME).txGet(); + new ActionListenerResponseHandler<>(ActionListener.map(listener, r -> null), + in -> TransportResponse.Empty.INSTANCE, ThreadPool.Names.GENERIC)); } @Override diff --git a/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java b/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java index e25557eaabc..97e5210c9d0 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java @@ -122,14 +122,15 @@ public class IndexLevelReplicationTests extends ESIndexLevelReplicationTestCase (indexShard, node) -> new RecoveryTarget(indexShard, node, recoveryListener) { @Override public void cleanFiles(int totalTranslogOps, long globalCheckpoint, - Store.MetadataSnapshot sourceMetaData) throws IOException { - super.cleanFiles(totalTranslogOps, globalCheckpoint, sourceMetaData); - latch.countDown(); - try { - latch.await(); - } catch (InterruptedException e) { - throw new AssertionError(e); - } + Store.MetadataSnapshot sourceMetaData, ActionListener listener) { + super.cleanFiles(totalTranslogOps, globalCheckpoint, sourceMetaData, ActionListener.runAfter(listener, () -> { + latch.countDown(); + try { + latch.await(); + } catch (InterruptedException e) { + throw new AssertionError(e); + } + })); } }); future.get(); diff --git a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java index c94c289f51f..c60f32132c6 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java @@ -848,9 +848,10 @@ public class RecoveryDuringReplicationTests extends ESIndexLevelReplicationTestC } @Override - public void cleanFiles(int totalTranslogOps, long globalCheckpoint, Store.MetadataSnapshot sourceMetaData) throws IOException { + public void cleanFiles(int totalTranslogOps, long globalCheckpoint, Store.MetadataSnapshot sourceMetaData, + ActionListener listener) { blockIfNeeded(RecoveryState.Stage.INDEX); - super.cleanFiles(totalTranslogOps, globalCheckpoint, sourceMetaData); + super.cleanFiles(totalTranslogOps, globalCheckpoint, sourceMetaData, listener); } @Override diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java index bb4c25e6186..5a6d7fbaa17 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java @@ -28,6 +28,7 @@ import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.flush.FlushRequest; +import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.UUIDs; @@ -189,7 +190,10 @@ public class PeerRecoveryTargetServiceTests extends IndexShardTestCase { for (Thread sender : senders) { sender.join(); } - recoveryTarget.cleanFiles(0, Long.parseLong(sourceSnapshot.getCommitUserData().get(SequenceNumbers.MAX_SEQ_NO)), sourceSnapshot); + PlainActionFuture cleanFilesFuture = new PlainActionFuture<>(); + recoveryTarget.cleanFiles(0, Long.parseLong(sourceSnapshot.getCommitUserData().get(SequenceNumbers.MAX_SEQ_NO)), + sourceSnapshot, cleanFilesFuture); + cleanFilesFuture.actionGet(); recoveryTarget.decRef(); Store.MetadataSnapshot targetSnapshot = targetShard.snapshotStoreMetadata(); Store.RecoveryDiff diff = sourceSnapshot.recoveryDiff(targetSnapshot); diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java index b00e89575cc..b69033ba9b4 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java @@ -98,7 +98,6 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import java.util.function.IntSupplier; -import java.util.function.Supplier; import java.util.zip.CRC32; import static java.util.Collections.emptyMap; @@ -478,9 +477,9 @@ public class RecoverySourceHandlerTests extends ESTestCase { between(1, 8)) { @Override - public SendFileResult phase1(final IndexCommit snapshot, final long globalCheckpoint, final Supplier translogOps) { + void phase1(IndexCommit snapshot, long globalCheckpoint, IntSupplier translogOps, ActionListener listener) { phase1Called.set(true); - return super.phase1(snapshot, globalCheckpoint, translogOps); + super.phase1(snapshot, globalCheckpoint, translogOps, listener); } @Override @@ -758,7 +757,8 @@ public class RecoverySourceHandlerTests extends ESTestCase { } @Override - public void cleanFiles(int totalTranslogOps, long globalCheckpoint, Store.MetadataSnapshot sourceMetaData) { + public void cleanFiles(int totalTranslogOps, long globalCheckpoint, Store.MetadataSnapshot sourceMetaData, + ActionListener listener) { } @Override diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java index c3f6a3aae89..28e84c1210a 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java @@ -47,7 +47,6 @@ import org.elasticsearch.index.store.Store; import org.elasticsearch.index.translog.SnapshotMatchers; import org.elasticsearch.index.translog.Translog; -import java.io.IOException; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -335,9 +334,10 @@ public class RecoveryTests extends ESIndexLevelReplicationTestCase { assertThat(replicaShard.getLastKnownGlobalCheckpoint(), equalTo(primaryShard.getLastKnownGlobalCheckpoint())); } @Override - public void cleanFiles(int totalTranslogOps, long globalCheckpoint, Store.MetadataSnapshot sourceMetaData) throws IOException { + public void cleanFiles(int totalTranslogOps, long globalCheckpoint, Store.MetadataSnapshot sourceMetaData, + ActionListener listener) { assertThat(globalCheckpoint, equalTo(primaryShard.getLastKnownGlobalCheckpoint())); - super.cleanFiles(totalTranslogOps, globalCheckpoint, sourceMetaData); + super.cleanFiles(totalTranslogOps, globalCheckpoint, sourceMetaData, listener); } }, true, true); List commits = DirectoryReader.listCommits(replicaShard.store().directory()); diff --git a/test/framework/src/main/java/org/elasticsearch/indices/recovery/AsyncRecoveryTarget.java b/test/framework/src/main/java/org/elasticsearch/indices/recovery/AsyncRecoveryTarget.java index d5a7ab8109e..cf2b768f46d 100644 --- a/test/framework/src/main/java/org/elasticsearch/indices/recovery/AsyncRecoveryTarget.java +++ b/test/framework/src/main/java/org/elasticsearch/indices/recovery/AsyncRecoveryTarget.java @@ -29,7 +29,6 @@ import org.elasticsearch.index.store.Store; import org.elasticsearch.index.store.StoreFileMetaData; import org.elasticsearch.index.translog.Translog; -import java.io.IOException; import java.util.List; import java.util.concurrent.Executor; @@ -75,8 +74,9 @@ public class AsyncRecoveryTarget implements RecoveryTargetHandler { } @Override - public void cleanFiles(int totalTranslogOps, long globalCheckpoint, Store.MetadataSnapshot sourceMetaData) throws IOException { - target.cleanFiles(totalTranslogOps, globalCheckpoint, sourceMetaData); + public void cleanFiles(int totalTranslogOps, long globalCheckpoint, Store.MetadataSnapshot sourceMetaData, + ActionListener listener) { + executor.execute(() -> target.cleanFiles(totalTranslogOps, globalCheckpoint, sourceMetaData, listener)); } @Override From fca7a1971302b5541272f3a8f350dac3c079b779 Mon Sep 17 00:00:00 2001 From: David Turner Date: Sun, 30 Jun 2019 16:44:57 +0100 Subject: [PATCH 41/42] Avoid parallel reroutes in DiskThresholdMonitor (#43381) Today the `DiskThresholdMonitor` limits the frequency with which it submits reroute tasks, but it might still submit these tasks faster than the master can process them if, for instance, each reroute takes over 60 seconds. This causes a problem since the reroute task runs with priority `IMMEDIATE` and is always scheduled when there is a node over the high watermark, so this can starve any other pending tasks on the master. This change avoids further updates from the monitor while its last task(s) are still in progress, and it measures the time of each update from the completion time of the reroute task rather than its start time, to allow a larger window for other tasks to run. It also now makes use of the `RoutingService` to submit the reroute task, in order to batch this task with any other pending reroutes. It enhances the `RoutingService` to notify its listeners on completion. Fixes #40174 Relates #42559 --- .../cluster/InternalClusterInfoService.java | 10 +- .../action/shard/ShardStateAction.java | 4 +- .../cluster/coordination/Coordinator.java | 6 +- .../cluster/coordination/JoinHelper.java | 9 +- .../coordination/JoinTaskExecutor.java | 12 +- .../cluster/routing/RoutingService.java | 81 ++++++-- .../allocation/DiskThresholdMonitor.java | 189 +++++++++++------- .../discovery/zen/NodeJoinController.java | 5 +- .../discovery/zen/ZenDiscovery.java | 2 +- .../gateway/GatewayAllocator.java | 6 +- .../java/org/elasticsearch/node/Node.java | 9 +- .../elasticsearch/cluster/DiskUsageTests.java | 2 +- .../cluster/coordination/JoinHelperTests.java | 4 +- .../cluster/coordination/NodeJoinTests.java | 2 +- .../cluster/routing/RoutingServiceTests.java | 170 ++++++++++++++++ .../allocation/DiskThresholdMonitorTests.java | 123 ++++++++++-- .../allocation/decider/MockDiskUsagesIT.java | 9 - .../zen/NodeJoinControllerTests.java | 2 +- .../discovery/zen/ZenDiscoveryUnitTests.java | 2 +- .../indices/cluster/ClusterStateChanges.java | 2 +- .../snapshots/SnapshotResiliencyTests.java | 4 +- .../AbstractCoordinatorTestCase.java | 2 +- 22 files changed, 503 insertions(+), 152 deletions(-) create mode 100644 server/src/test/java/org/elasticsearch/cluster/routing/RoutingServiceTests.java diff --git a/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java b/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java index 8d78f9c838e..4b893619891 100644 --- a/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java +++ b/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java @@ -131,13 +131,13 @@ public class InternalClusterInfoService implements ClusterInfoService, LocalNode logger.trace("I have been elected master, scheduling a ClusterInfoUpdateJob"); } - // Submit a job that will start after DEFAULT_STARTING_INTERVAL, and reschedule itself after running + // Submit a job that will reschedule itself after running threadPool.scheduleUnlessShuttingDown(updateFrequency, executorName(), new SubmitReschedulingClusterInfoUpdatedJob()); try { if (clusterService.state().getNodes().getDataNodes().size() > 1) { // Submit an info update job to be run immediately - threadPool.executor(executorName()).execute(() -> maybeRefresh()); + threadPool.executor(executorName()).execute(this::maybeRefresh); } } catch (EsRejectedExecutionException ex) { logger.debug("Couldn't schedule cluster info update task - node might be shutting down", ex); @@ -173,7 +173,7 @@ public class InternalClusterInfoService implements ClusterInfoService, LocalNode if (logger.isDebugEnabled()) { logger.debug("data node was added, retrieving new cluster info"); } - threadPool.executor(executorName()).execute(() -> maybeRefresh()); + threadPool.executor(executorName()).execute(this::maybeRefresh); } if (this.isMaster && event.nodesRemoved()) { @@ -316,7 +316,7 @@ public class InternalClusterInfoService implements ClusterInfoService, LocalNode ShardStats[] stats = indicesStatsResponse.getShards(); ImmutableOpenMap.Builder newShardSizes = ImmutableOpenMap.builder(); ImmutableOpenMap.Builder newShardRoutingToDataPath = ImmutableOpenMap.builder(); - buildShardLevelInfo(logger, stats, newShardSizes, newShardRoutingToDataPath, clusterService.state()); + buildShardLevelInfo(logger, stats, newShardSizes, newShardRoutingToDataPath); shardSizes = newShardSizes.build(); shardRoutingToDataPath = newShardRoutingToDataPath.build(); } @@ -365,7 +365,7 @@ public class InternalClusterInfoService implements ClusterInfoService, LocalNode } static void buildShardLevelInfo(Logger logger, ShardStats[] stats, ImmutableOpenMap.Builder newShardSizes, - ImmutableOpenMap.Builder newShardRoutingToDataPath, ClusterState state) { + ImmutableOpenMap.Builder newShardRoutingToDataPath) { for (ShardStats s : stats) { newShardRoutingToDataPath.put(s.getShardRouting(), s.getDataPath()); long size = s.getStats().getStore().sizeInBytes(); diff --git a/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java index b07ba8d09f3..32cc265a6c7 100644 --- a/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java +++ b/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java @@ -382,7 +382,9 @@ public class ShardStateAction { if (logger.isTraceEnabled()) { logger.trace("{}, scheduling a reroute", reason); } - routingService.reroute(reason); + routingService.reroute(reason, ActionListener.wrap( + r -> logger.trace("{}, reroute completed", reason), + e -> logger.debug(new ParameterizedMessage("{}, reroute failed", reason), e))); } } } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java index e4619d33a7e..b9f3fdbd8c7 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java @@ -83,7 +83,6 @@ import java.util.Random; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.BiConsumer; -import java.util.function.Consumer; import java.util.function.Supplier; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -152,13 +151,14 @@ public class Coordinator extends AbstractLifecycleComponent implements Discovery * @param nodeName The name of the node, used to name the {@link java.util.concurrent.ExecutorService} of the {@link SeedHostsResolver}. * @param onJoinValidators A collection of join validators to restrict which nodes may join the cluster. * @param reroute A callback to call when the membership of the cluster has changed, to recalculate the assignment of shards. In - * production code this calls {@link org.elasticsearch.cluster.routing.RoutingService#reroute(String)}. + * production code this calls + * {@link org.elasticsearch.cluster.routing.RoutingService#reroute(String, ActionListener)}. */ public Coordinator(String nodeName, Settings settings, ClusterSettings clusterSettings, TransportService transportService, NamedWriteableRegistry namedWriteableRegistry, AllocationService allocationService, MasterService masterService, Supplier persistedStateSupplier, SeedHostsProvider seedHostsProvider, ClusterApplier clusterApplier, Collection> onJoinValidators, Random random, - Consumer reroute, ElectionStrategy electionStrategy) { + BiConsumer> reroute, ElectionStrategy electionStrategy) { this.settings = settings; this.transportService = transportService; this.masterService = masterService; diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java index bdf2afe9213..7d4a1f41cd2 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java @@ -64,7 +64,6 @@ import java.util.Optional; import java.util.Set; import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiConsumer; -import java.util.function.Consumer; import java.util.function.Function; import java.util.function.LongSupplier; import java.util.function.Supplier; @@ -91,10 +90,10 @@ public class JoinHelper { private AtomicReference lastFailedJoinAttempt = new AtomicReference<>(); - public JoinHelper(Settings settings, AllocationService allocationService, MasterService masterService, - TransportService transportService, LongSupplier currentTermSupplier, Supplier currentStateSupplier, - BiConsumer joinHandler, Function joinLeaderInTerm, - Collection> joinValidators, Consumer reroute) { + JoinHelper(Settings settings, AllocationService allocationService, MasterService masterService, + TransportService transportService, LongSupplier currentTermSupplier, Supplier currentStateSupplier, + BiConsumer joinHandler, Function joinLeaderInTerm, + Collection> joinValidators, BiConsumer> reroute) { this.masterService = masterService; this.transportService = transportService; this.joinTimeout = JOIN_TIMEOUT_SETTING.get(settings); diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinTaskExecutor.java b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinTaskExecutor.java index f82ff1a1155..2f129fb1936 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinTaskExecutor.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinTaskExecutor.java @@ -20,6 +20,7 @@ package org.elasticsearch.cluster.coordination; import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.NotMasterException; @@ -38,7 +39,6 @@ import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.function.BiConsumer; -import java.util.function.Consumer; import static org.elasticsearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; @@ -47,7 +47,7 @@ public class JoinTaskExecutor implements ClusterStateTaskExecutor reroute; + private final BiConsumer> reroute; private final int minimumMasterNodesOnLocalNode; @@ -86,7 +86,8 @@ public class JoinTaskExecutor implements ClusterStateTaskExecutor reroute) { + public JoinTaskExecutor(Settings settings, AllocationService allocationService, Logger logger, + BiConsumer> reroute) { this.allocationService = allocationService; this.logger = logger; minimumMasterNodesOnLocalNode = ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.get(settings); @@ -154,7 +155,10 @@ public class JoinTaskExecutor implements ClusterStateTaskExecutor logger.trace("post-join reroute completed"), + e -> logger.debug("post-join reroute failed", e))); + return results.build(allocationService.adaptAutoExpandReplicas(newState.nodes(nodesBuilder).build())); } else { // we must return a new cluster state instance to force publishing. This is important diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java index 89e19e02b30..7068f907905 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java @@ -22,16 +22,20 @@ package org.elasticsearch.cluster.routing; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.PlainListenableActionFuture; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; -import org.elasticsearch.cluster.routing.allocation.AllocationService; +import org.elasticsearch.cluster.NotMasterException; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; -import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.BiFunction; /** * A {@link RoutingService} listens to clusters state. When this service @@ -51,14 +55,16 @@ public class RoutingService extends AbstractLifecycleComponent { private static final String CLUSTER_UPDATE_TASK_SOURCE = "cluster_reroute"; private final ClusterService clusterService; - private final AllocationService allocationService; + private final BiFunction reroute; - private AtomicBoolean rerouting = new AtomicBoolean(); + private final Object mutex = new Object(); + @Nullable // null if no reroute is currently pending + private PlainListenableActionFuture pendingRerouteListeners; @Inject - public RoutingService(ClusterService clusterService, AllocationService allocationService) { + public RoutingService(ClusterService clusterService, BiFunction reroute) { this.clusterService = clusterService; - this.allocationService = allocationService; + this.reroute = reroute; } @Override @@ -76,34 +82,55 @@ public class RoutingService extends AbstractLifecycleComponent { /** * Initiates a reroute. */ - public final void reroute(String reason) { + public final void reroute(String reason, ActionListener listener) { + if (lifecycle.started() == false) { + listener.onFailure(new IllegalStateException( + "rejecting delayed reroute [" + reason + "] in state [" + lifecycleState() + "]")); + return; + } + final PlainListenableActionFuture currentListeners; + synchronized (mutex) { + if (pendingRerouteListeners != null) { + logger.trace("already has pending reroute, adding [{}] to batch", reason); + pendingRerouteListeners.addListener(listener); + return; + } + currentListeners = PlainListenableActionFuture.newListenableFuture(); + currentListeners.addListener(listener); + pendingRerouteListeners = currentListeners; + } + logger.trace("rerouting [{}]", reason); try { - if (lifecycle.stopped()) { - return; - } - if (rerouting.compareAndSet(false, true) == false) { - logger.trace("already has pending reroute, ignoring {}", reason); - return; - } - logger.trace("rerouting {}", reason); clusterService.submitStateUpdateTask(CLUSTER_UPDATE_TASK_SOURCE + "(" + reason + ")", new ClusterStateUpdateTask(Priority.HIGH) { @Override public ClusterState execute(ClusterState currentState) { - rerouting.set(false); - return allocationService.reroute(currentState, reason); + synchronized (mutex) { + assert pendingRerouteListeners == currentListeners; + pendingRerouteListeners = null; + } + return reroute.apply(currentState, reason); } @Override public void onNoLongerMaster(String source) { - rerouting.set(false); - // no biggie + synchronized (mutex) { + if (pendingRerouteListeners == currentListeners) { + pendingRerouteListeners = null; + } + } + currentListeners.onFailure(new NotMasterException("delayed reroute [" + reason + "] cancelled")); + // no big deal, the new master will reroute again } @Override public void onFailure(String source, Exception e) { - rerouting.set(false); - ClusterState state = clusterService.state(); + synchronized (mutex) { + if (pendingRerouteListeners == currentListeners) { + pendingRerouteListeners = null; + } + } + final ClusterState state = clusterService.state(); if (logger.isTraceEnabled()) { logger.error(() -> new ParameterizedMessage("unexpected failure during [{}], current state:\n{}", source, state), e); @@ -111,12 +138,22 @@ public class RoutingService extends AbstractLifecycleComponent { logger.error(() -> new ParameterizedMessage("unexpected failure during [{}], current state version [{}]", source, state.version()), e); } + currentListeners.onFailure(new ElasticsearchException("delayed reroute [" + reason + "] failed", e)); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + currentListeners.onResponse(null); } }); } catch (Exception e) { - rerouting.set(false); + synchronized (mutex) { + assert pendingRerouteListeners == currentListeners; + pendingRerouteListeners = null; + } ClusterState state = clusterService.state(); logger.warn(() -> new ParameterizedMessage("failed to reroute routing table, current state:\n{}", state), e); + currentListeners.onFailure(new ElasticsearchException("delayed reroute [" + reason + "] could not be submitted", e)); } } } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java index 4badab5a0ca..96e4974b9b4 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java @@ -21,12 +21,20 @@ package org.elasticsearch.cluster.routing.allocation; import java.util.HashSet; import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; +import java.util.function.BiConsumer; +import java.util.function.Consumer; +import java.util.function.LongSupplier; import java.util.function.Supplier; import com.carrotsearch.hppc.ObjectLookupContainer; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.GroupedActionListener; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterInfo; import org.elasticsearch.cluster.ClusterState; @@ -54,11 +62,15 @@ public class DiskThresholdMonitor { private final Client client; private final Set nodeHasPassedWatermark = Sets.newConcurrentHashSet(); private final Supplier clusterStateSupplier; - private long lastRunNS; + private final LongSupplier currentTimeMillisSupplier; + private final AtomicLong lastRunTimeMillis = new AtomicLong(Long.MIN_VALUE); + private final AtomicBoolean checkInProgress = new AtomicBoolean(); + private final SetOnce>> rerouteAction = new SetOnce<>(); public DiskThresholdMonitor(Settings settings, Supplier clusterStateSupplier, ClusterSettings clusterSettings, - Client client) { + Client client, LongSupplier currentTimeMillisSupplier) { this.clusterStateSupplier = clusterStateSupplier; + this.currentTimeMillisSupplier = currentTimeMillisSupplier; this.diskThresholdSettings = new DiskThresholdSettings(settings, clusterSettings); this.client = client; } @@ -92,88 +104,129 @@ public class DiskThresholdMonitor { } } + private void checkFinished() { + final boolean checkFinished = checkInProgress.compareAndSet(true, false); + assert checkFinished; + } public void onNewInfo(ClusterInfo info) { - ImmutableOpenMap usages = info.getNodeLeastAvailableDiskUsages(); - if (usages != null) { - boolean reroute = false; - String explanation = ""; - // Garbage collect nodes that have been removed from the cluster - // from the map that tracks watermark crossing - ObjectLookupContainer nodes = usages.keys(); - for (String node : nodeHasPassedWatermark) { - if (nodes.contains(node) == false) { - nodeHasPassedWatermark.remove(node); - } + assert rerouteAction.get() != null; + + if (checkInProgress.compareAndSet(false, true) == false) { + logger.info("skipping monitor as a check is already in progress"); + return; + } + + final ImmutableOpenMap usages = info.getNodeLeastAvailableDiskUsages(); + if (usages == null) { + checkFinished(); + return; + } + + boolean reroute = false; + String explanation = ""; + final long currentTimeMillis = currentTimeMillisSupplier.getAsLong(); + + // Garbage collect nodes that have been removed from the cluster + // from the map that tracks watermark crossing + final ObjectLookupContainer nodes = usages.keys(); + for (String node : nodeHasPassedWatermark) { + if (nodes.contains(node) == false) { + nodeHasPassedWatermark.remove(node); } - ClusterState state = clusterStateSupplier.get(); - Set indicesToMarkReadOnly = new HashSet<>(); - for (ObjectObjectCursor entry : usages) { - String node = entry.key; - DiskUsage usage = entry.value; - warnAboutDiskIfNeeded(usage); - if (usage.getFreeBytes() < diskThresholdSettings.getFreeBytesThresholdFloodStage().getBytes() || - usage.getFreeDiskAsPercentage() < diskThresholdSettings.getFreeDiskThresholdFloodStage()) { - RoutingNode routingNode = state.getRoutingNodes().node(node); - if (routingNode != null) { // this might happen if we haven't got the full cluster-state yet?! - for (ShardRouting routing : routingNode) { - indicesToMarkReadOnly.add(routing.index().getName()); - } + } + final ClusterState state = clusterStateSupplier.get(); + final Set indicesToMarkReadOnly = new HashSet<>(); + + for (final ObjectObjectCursor entry : usages) { + final String node = entry.key; + final DiskUsage usage = entry.value; + warnAboutDiskIfNeeded(usage); + if (usage.getFreeBytes() < diskThresholdSettings.getFreeBytesThresholdFloodStage().getBytes() || + usage.getFreeDiskAsPercentage() < diskThresholdSettings.getFreeDiskThresholdFloodStage()) { + final RoutingNode routingNode = state.getRoutingNodes().node(node); + if (routingNode != null) { // this might happen if we haven't got the full cluster-state yet?! + for (ShardRouting routing : routingNode) { + indicesToMarkReadOnly.add(routing.index().getName()); } - } else if (usage.getFreeBytes() < diskThresholdSettings.getFreeBytesThresholdHigh().getBytes() || - usage.getFreeDiskAsPercentage() < diskThresholdSettings.getFreeDiskThresholdHigh()) { - if ((System.nanoTime() - lastRunNS) > diskThresholdSettings.getRerouteInterval().nanos()) { - lastRunNS = System.nanoTime(); + } + } else if (usage.getFreeBytes() < diskThresholdSettings.getFreeBytesThresholdHigh().getBytes() || + usage.getFreeDiskAsPercentage() < diskThresholdSettings.getFreeDiskThresholdHigh()) { + if (lastRunTimeMillis.get() < currentTimeMillis - diskThresholdSettings.getRerouteInterval().millis()) { + reroute = true; + explanation = "high disk watermark exceeded on one or more nodes"; + } else { + logger.debug("high disk watermark exceeded on {} but an automatic reroute has occurred " + + "in the last [{}], skipping reroute", + node, diskThresholdSettings.getRerouteInterval()); + } + nodeHasPassedWatermark.add(node); + } else if (usage.getFreeBytes() < diskThresholdSettings.getFreeBytesThresholdLow().getBytes() || + usage.getFreeDiskAsPercentage() < diskThresholdSettings.getFreeDiskThresholdLow()) { + nodeHasPassedWatermark.add(node); + } else { + if (nodeHasPassedWatermark.contains(node)) { + // The node has previously been over the high or + // low watermark, but is no longer, so we should + // reroute so any unassigned shards can be allocated + // if they are able to be + if (lastRunTimeMillis.get() < currentTimeMillis - diskThresholdSettings.getRerouteInterval().millis()) { reroute = true; - explanation = "high disk watermark exceeded on one or more nodes"; + explanation = "one or more nodes has gone under the high or low watermark"; + nodeHasPassedWatermark.remove(node); } else { - logger.debug("high disk watermark exceeded on {} but an automatic reroute has occurred " + + logger.debug("{} has gone below a disk threshold, but an automatic reroute has occurred " + "in the last [{}], skipping reroute", node, diskThresholdSettings.getRerouteInterval()); } - nodeHasPassedWatermark.add(node); - } else if (usage.getFreeBytes() < diskThresholdSettings.getFreeBytesThresholdLow().getBytes() || - usage.getFreeDiskAsPercentage() < diskThresholdSettings.getFreeDiskThresholdLow()) { - nodeHasPassedWatermark.add(node); - } else { - if (nodeHasPassedWatermark.contains(node)) { - // The node has previously been over the high or - // low watermark, but is no longer, so we should - // reroute so any unassigned shards can be allocated - // if they are able to be - if ((System.nanoTime() - lastRunNS) > diskThresholdSettings.getRerouteInterval().nanos()) { - lastRunNS = System.nanoTime(); - reroute = true; - explanation = "one or more nodes has gone under the high or low watermark"; - nodeHasPassedWatermark.remove(node); - } else { - logger.debug("{} has gone below a disk threshold, but an automatic reroute has occurred " + - "in the last [{}], skipping reroute", - node, diskThresholdSettings.getRerouteInterval()); - } - } } } - if (reroute) { - logger.info("rerouting shards: [{}]", explanation); - reroute(); - } - indicesToMarkReadOnly.removeIf(index -> state.getBlocks().indexBlocked(ClusterBlockLevel.WRITE, index)); - if (indicesToMarkReadOnly.isEmpty() == false) { - markIndicesReadOnly(indicesToMarkReadOnly); - } + } + + final ActionListener listener = new GroupedActionListener<>(ActionListener.wrap(this::checkFinished), 2); + + if (reroute) { + logger.info("rerouting shards: [{}]", explanation); + rerouteAction.get().accept(ActionListener.wrap(r -> { + setLastRunTimeMillis(); + listener.onResponse(r); + }, e -> { + logger.debug("reroute failed", e); + setLastRunTimeMillis(); + listener.onFailure(e); + })); + } else { + listener.onResponse(null); + } + + indicesToMarkReadOnly.removeIf(index -> state.getBlocks().indexBlocked(ClusterBlockLevel.WRITE, index)); + if (indicesToMarkReadOnly.isEmpty() == false) { + markIndicesReadOnly(indicesToMarkReadOnly, ActionListener.wrap(r -> { + setLastRunTimeMillis(); + listener.onResponse(r); + }, e -> { + logger.debug("marking indices readonly failed", e); + setLastRunTimeMillis(); + listener.onFailure(e); + })); + } else { + listener.onResponse(null); } } - protected void markIndicesReadOnly(Set indicesToMarkReadOnly) { - // set read-only block but don't block on the response - client.admin().indices().prepareUpdateSettings(indicesToMarkReadOnly.toArray(Strings.EMPTY_ARRAY)). - setSettings(Settings.builder().put(IndexMetaData.SETTING_READ_ONLY_ALLOW_DELETE, true).build()).execute(); + private void setLastRunTimeMillis() { + lastRunTimeMillis.getAndUpdate(l -> Math.max(l, currentTimeMillisSupplier.getAsLong())); } - protected void reroute() { - // Execute an empty reroute, but don't block on the response - client.admin().cluster().prepareReroute().execute(); + protected void markIndicesReadOnly(Set indicesToMarkReadOnly, ActionListener listener) { + // set read-only block but don't block on the response + client.admin().indices().prepareUpdateSettings(indicesToMarkReadOnly.toArray(Strings.EMPTY_ARRAY)) + .setSettings(Settings.builder().put(IndexMetaData.SETTING_READ_ONLY_ALLOW_DELETE, true).build()) + .execute(ActionListener.map(listener, r -> null)); + } + + public void setRerouteAction(BiConsumer> rerouteAction) { + this.rerouteAction.set(listener -> rerouteAction.accept("disk threshold monitor", listener)); } } diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java b/server/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java index bce5695a817..f382a6b7a84 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java @@ -22,6 +22,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.store.AlreadyClosedException; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateTaskConfig; @@ -43,7 +44,7 @@ import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.function.Consumer; +import java.util.function.BiConsumer; /** * This class processes incoming join request (passed zia {@link ZenDiscovery}). Incoming nodes @@ -62,7 +63,7 @@ public class NodeJoinController { public NodeJoinController(Settings settings, MasterService masterService, AllocationService allocationService, - ElectMasterService electMaster, Consumer reroute) { + ElectMasterService electMaster, BiConsumer> reroute) { this.masterService = masterService; joinTaskExecutor = new JoinTaskExecutor(settings, allocationService, logger, reroute) { @Override diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java b/server/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java index c11dfb16ad5..09dcd74c6e3 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java @@ -165,7 +165,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover NamedWriteableRegistry namedWriteableRegistry, MasterService masterService, ClusterApplier clusterApplier, ClusterSettings clusterSettings, SeedHostsProvider hostsProvider, AllocationService allocationService, Collection> onJoinValidators, GatewayMetaState gatewayMetaState, - Consumer reroute) { + BiConsumer> reroute) { this.onJoinValidators = JoinTaskExecutor.addBuiltInJoinValidators(onJoinValidators); this.masterService = masterService; this.clusterApplier = clusterApplier; diff --git a/server/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java b/server/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java index 82627cfdc0b..6543d5d1174 100644 --- a/server/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java +++ b/server/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java @@ -21,6 +21,8 @@ package org.elasticsearch.gateway; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.nodes.BaseNodeResponse; import org.elasticsearch.action.support.nodes.BaseNodesResponse; import org.elasticsearch.cluster.routing.RoutingNodes; @@ -137,7 +139,9 @@ public class GatewayAllocator { @Override protected void reroute(ShardId shardId, String reason) { logger.trace("{} scheduling reroute for {}", shardId, reason); - routingService.reroute("async_shard_fetch"); + routingService.reroute("async_shard_fetch", ActionListener.wrap( + r -> logger.trace("{} scheduled reroute completed for {}", shardId, reason), + e -> logger.debug(new ParameterizedMessage("{} scheduled reroute failed for {}", shardId, reason), e))); } } diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 0f9c45fd1a7..cfc98d236e9 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -368,10 +368,10 @@ public class Node implements Closeable { .newHashPublisher()); final IngestService ingestService = new IngestService(clusterService, threadPool, this.environment, scriptModule.getScriptService(), analysisModule.getAnalysisRegistry(), pluginsService.filterPlugins(IngestPlugin.class)); - final DiskThresholdMonitor listener = new DiskThresholdMonitor(settings, clusterService::state, - clusterService.getClusterSettings(), client); + final DiskThresholdMonitor diskThresholdMonitor = new DiskThresholdMonitor(settings, clusterService::state, + clusterService.getClusterSettings(), client, threadPool::relativeTimeInMillis); final ClusterInfoService clusterInfoService = newClusterInfoService(settings, clusterService, threadPool, client, - listener::onNewInfo); + diskThresholdMonitor::onNewInfo); final UsageService usageService = new UsageService(); ModulesBuilder modules = new ModulesBuilder(); @@ -506,7 +506,7 @@ public class Node implements Closeable { RestoreService restoreService = new RestoreService(clusterService, repositoryService, clusterModule.getAllocationService(), metaDataCreateIndexService, metaDataIndexUpgradeService, clusterService.getClusterSettings()); - final RoutingService routingService = new RoutingService(clusterService, clusterModule.getAllocationService()); + final RoutingService routingService = new RoutingService(clusterService, clusterModule.getAllocationService()::reroute); final DiscoveryModule discoveryModule = new DiscoveryModule(settings, threadPool, transportService, namedWriteableRegistry, networkService, clusterService.getMasterService(), clusterService.getClusterApplierService(), clusterService.getClusterSettings(), pluginsService.filterPlugins(DiscoveryPlugin.class), @@ -515,6 +515,7 @@ public class Node implements Closeable { transportService, indicesService, pluginsService, circuitBreakerService, scriptModule.getScriptService(), httpServerTransport, ingestService, clusterService, settingsModule.getSettingsFilter(), responseCollectorService, searchTransportService); + diskThresholdMonitor.setRerouteAction(routingService::reroute); final SearchService searchService = newSearchService(clusterService, indicesService, threadPool, scriptModule.getScriptService(), bigArrays, searchModule.getFetchPhase(), diff --git a/server/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java b/server/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java index fcccaf6c0f0..55eae6fc0e9 100644 --- a/server/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java @@ -119,7 +119,7 @@ public class DiskUsageTests extends ESTestCase { ImmutableOpenMap.Builder shardSizes = ImmutableOpenMap.builder(); ImmutableOpenMap.Builder routingToPath = ImmutableOpenMap.builder(); ClusterState state = ClusterState.builder(new ClusterName("blarg")).version(0).build(); - InternalClusterInfoService.buildShardLevelInfo(logger, stats, shardSizes, routingToPath, state); + InternalClusterInfoService.buildShardLevelInfo(logger, stats, shardSizes, routingToPath); assertEquals(2, shardSizes.size()); assertTrue(shardSizes.containsKey(ClusterInfo.shardIdentifierFromRouting(test_0))); assertTrue(shardSizes.containsKey(ClusterInfo.shardIdentifierFromRouting(test_1))); diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/JoinHelperTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/JoinHelperTests.java index e0b161b76b3..4aeee62948b 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/JoinHelperTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/JoinHelperTests.java @@ -58,7 +58,7 @@ public class JoinHelperTests extends ESTestCase { x -> localNode, null, Collections.emptySet()); JoinHelper joinHelper = new JoinHelper(Settings.EMPTY, null, null, transportService, () -> 0L, () -> null, (joinRequest, joinCallback) -> { throw new AssertionError(); }, startJoinRequest -> { throw new AssertionError(); }, - Collections.emptyList(), s -> {}); + Collections.emptyList(), (s, r) -> {}); transportService.start(); DiscoveryNode node1 = new DiscoveryNode("node1", buildNewFakeTransportAddress(), Version.CURRENT); @@ -164,7 +164,7 @@ public class JoinHelperTests extends ESTestCase { x -> localNode, null, Collections.emptySet()); new JoinHelper(Settings.EMPTY, null, null, transportService, () -> 0L, () -> localClusterState, (joinRequest, joinCallback) -> { throw new AssertionError(); }, startJoinRequest -> { throw new AssertionError(); }, - Collections.emptyList(), s -> {}); // registers request handler + Collections.emptyList(), (s, r) -> {}); // registers request handler transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinTests.java index d407ebdc2ce..dcb6d26de9b 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinTests.java @@ -174,7 +174,7 @@ public class NodeJoinTests extends ESTestCase { () -> new InMemoryPersistedState(term, initialState), r -> emptyList(), new NoOpClusterApplier(), Collections.emptyList(), - random, s -> {}, ElectionStrategy.DEFAULT_INSTANCE); + random, (s, r) -> {}, ElectionStrategy.DEFAULT_INSTANCE); transportService.start(); transportService.acceptIncomingRequests(); transport = capturingTransport; diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/RoutingServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/RoutingServiceTests.java new file mode 100644 index 00000000000..5368c1c5544 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/cluster/routing/RoutingServiceTests.java @@ -0,0 +1,170 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.cluster.routing; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.coordination.FailedToCommitClusterStateException; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.test.ClusterServiceUtils; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; + +import java.util.concurrent.BrokenBarrierException; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; + +import static org.hamcrest.Matchers.lessThan; +import static org.hamcrest.Matchers.startsWith; + +public class RoutingServiceTests extends ESTestCase { + + private ThreadPool threadPool; + private ClusterService clusterService; + + @Before + public void beforeTest() { + threadPool = new TestThreadPool("test"); + clusterService = ClusterServiceUtils.createClusterService(threadPool); + } + + @After + public void afterTest() { + clusterService.stop(); + threadPool.shutdown(); + } + + public void testRejectionUnlessStarted() { + final RoutingService routingService = new RoutingService(clusterService, (s, r) -> s); + final PlainActionFuture future = new PlainActionFuture<>(); + + if (randomBoolean()) { + routingService.start(); + routingService.stop(); + } else if (randomBoolean()) { + routingService.close(); + } + + routingService.reroute("test", future); + assertTrue(future.isDone()); + assertThat(expectThrows(IllegalStateException.class, future::actionGet).getMessage(), + startsWith("rejecting delayed reroute [test] in state [")); + } + + public void testReroutesWhenRequested() throws InterruptedException { + final AtomicLong rerouteCount = new AtomicLong(); + final RoutingService routingService = new RoutingService(clusterService, (s, r) -> { + rerouteCount.incrementAndGet(); + return s; + }); + + routingService.start(); + + long rerouteCountBeforeReroute = 0L; + final int iterations = between(1, 100); + final CountDownLatch countDownLatch = new CountDownLatch(iterations); + for (int i = 0; i < iterations; i++) { + rerouteCountBeforeReroute = Math.max(rerouteCountBeforeReroute, rerouteCount.get()); + routingService.reroute("iteration " + i, ActionListener.wrap(countDownLatch::countDown)); + } + countDownLatch.await(10, TimeUnit.SECONDS); + assertThat(rerouteCountBeforeReroute, lessThan(rerouteCount.get())); + } + + public void testBatchesReroutesTogether() throws BrokenBarrierException, InterruptedException { + final CyclicBarrier cyclicBarrier = new CyclicBarrier(2); + clusterService.submitStateUpdateTask("block master service", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + cyclicBarrier.await(); // notify test that we are blocked + cyclicBarrier.await(); // wait to be unblocked by test + return currentState; + } + + @Override + public void onFailure(String source, Exception e) { + throw new AssertionError(source, e); + } + }); + + cyclicBarrier.await(); // wait for master thread to be blocked + + final AtomicBoolean rerouteExecuted = new AtomicBoolean(); + final RoutingService routingService = new RoutingService(clusterService, (s, r) -> { + assertTrue(rerouteExecuted.compareAndSet(false, true)); // only called once + return s; + }); + + routingService.start(); + + final int iterations = between(1, 100); + final CountDownLatch countDownLatch = new CountDownLatch(iterations); + for (int i = 0; i < iterations; i++) { + routingService.reroute("iteration " + i, ActionListener.wrap(countDownLatch::countDown)); + } + + cyclicBarrier.await(); // allow master thread to continue; + countDownLatch.await(); // wait for reroute to complete + assertTrue(rerouteExecuted.get()); // see above for assertion that it's only called once + } + + public void testNotifiesOnFailure() throws InterruptedException { + + final RoutingService routingService = new RoutingService(clusterService, (s, r) -> { + if (rarely()) { + throw new ElasticsearchException("simulated"); + } + return randomBoolean() ? s : ClusterState.builder(s).build(); + }); + routingService.start(); + + final int iterations = between(1, 100); + final CountDownLatch countDownLatch = new CountDownLatch(iterations); + for (int i = 0; i < iterations; i++) { + routingService.reroute("iteration " + i, ActionListener.wrap(countDownLatch::countDown)); + if (rarely()) { + clusterService.getMasterService().setClusterStatePublisher( + randomBoolean() + ? ClusterServiceUtils.createClusterStatePublisher(clusterService.getClusterApplierService()) + : (event, publishListener, ackListener) + -> publishListener.onFailure(new FailedToCommitClusterStateException("simulated"))); + } + + if (rarely()) { + clusterService.getClusterApplierService().onNewClusterState("simulated", () -> { + ClusterState state = clusterService.state(); + return ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()) + .masterNodeId(randomBoolean() ? null : state.nodes().getLocalNodeId())).build(); + }, (source, e) -> { }); + } + } + + assertTrue(countDownLatch.await(10, TimeUnit.SECONDS)); // i.e. it doesn't leak any listeners + } +} diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitorTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitorTests.java index b245b0d35d6..5ba5b7a0a70 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitorTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.routing.allocation; import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterInfo; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -35,16 +36,17 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import java.util.Arrays; +import java.util.Collections; import java.util.HashSet; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; public class DiskThresholdMonitorTests extends ESAllocationTestCase { - public void testMarkFloodStageIndicesReadOnly() { AllocationService allocation = createAllocationService(Settings.builder() .put("cluster.routing.allocation.node_concurrent_recoveries", 10).build()); @@ -61,7 +63,6 @@ public class DiskThresholdMonitorTests extends ESAllocationTestCase { .addAsNew(metaData.index("test")) .addAsNew(metaData.index("test_1")) .addAsNew(metaData.index("test_2")) - .build(); ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData).routingTable(routingTable).build(); @@ -74,18 +75,21 @@ public class DiskThresholdMonitorTests extends ESAllocationTestCase { ClusterState finalState = clusterState; AtomicBoolean reroute = new AtomicBoolean(false); AtomicReference> indices = new AtomicReference<>(); + AtomicLong currentTime = new AtomicLong(); DiskThresholdMonitor monitor = new DiskThresholdMonitor(settings, () -> finalState, - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null) { + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null, currentTime::get) { @Override - protected void reroute() { - assertTrue(reroute.compareAndSet(false, true)); - } - - @Override - protected void markIndicesReadOnly(Set indicesToMarkReadOnly) { + protected void markIndicesReadOnly(Set indicesToMarkReadOnly, ActionListener listener) { assertTrue(indices.compareAndSet(null, indicesToMarkReadOnly)); + listener.onResponse(null); } }; + + monitor.setRerouteAction((reason, listener) -> { + assertTrue(reroute.compareAndSet(false, true)); + listener.onResponse(null); + }); + ImmutableOpenMap.Builder builder = ImmutableOpenMap.builder(); builder.put("node1", new DiskUsage("node1","node1", "/foo/bar", 100, 4)); builder.put("node2", new DiskUsage("node2","node2", "/foo/bar", 100, 30)); @@ -97,6 +101,7 @@ public class DiskThresholdMonitorTests extends ESAllocationTestCase { builder = ImmutableOpenMap.builder(); builder.put("node1", new DiskUsage("node1","node1", "/foo/bar", 100, 4)); builder.put("node2", new DiskUsage("node2","node2", "/foo/bar", 100, 5)); + currentTime.addAndGet(randomLongBetween(60001, 120000)); monitor.onNewInfo(new ClusterInfo(builder.build(), null, null, null)); assertTrue(reroute.get()); assertEquals(new HashSet<>(Arrays.asList("test_1", "test_2")), indices.get()); @@ -114,17 +119,17 @@ public class DiskThresholdMonitorTests extends ESAllocationTestCase { assertTrue(anotherFinalClusterState.blocks().indexBlocked(ClusterBlockLevel.WRITE, "test_2")); monitor = new DiskThresholdMonitor(settings, () -> anotherFinalClusterState, - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null) { + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null, currentTime::get) { @Override - protected void reroute() { - assertTrue(reroute.compareAndSet(false, true)); - } - - @Override - protected void markIndicesReadOnly(Set indicesToMarkReadOnly) { + protected void markIndicesReadOnly(Set indicesToMarkReadOnly, ActionListener listener) { assertTrue(indices.compareAndSet(null, indicesToMarkReadOnly)); + listener.onResponse(null); } }; + monitor.setRerouteAction((reason, listener) -> { + assertTrue(reroute.compareAndSet(false, true)); + listener.onResponse(null); + }); indices.set(null); reroute.set(false); @@ -133,6 +138,90 @@ public class DiskThresholdMonitorTests extends ESAllocationTestCase { builder.put("node2", new DiskUsage("node2","node2", "/foo/bar", 100, 5)); monitor.onNewInfo(new ClusterInfo(builder.build(), null, null, null)); assertTrue(reroute.get()); - assertEquals(new HashSet<>(Arrays.asList("test_1")), indices.get()); + assertEquals(Collections.singleton("test_1"), indices.get()); + } + + public void testDoesNotSubmitRerouteTaskTooFrequently() { + final ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); + AtomicLong currentTime = new AtomicLong(); + AtomicReference> listenerReference = new AtomicReference<>(); + DiskThresholdMonitor monitor = new DiskThresholdMonitor(Settings.EMPTY, () -> clusterState, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null, currentTime::get) { + @Override + protected void markIndicesReadOnly(Set indicesToMarkReadOnly, ActionListener listener) { + throw new AssertionError("unexpected"); + } + }; + + monitor.setRerouteAction((reason, listener) -> { + assertNotNull(listener); + assertTrue(listenerReference.compareAndSet(null, listener)); + }); + + final ImmutableOpenMap.Builder allDisksOkBuilder; + allDisksOkBuilder = ImmutableOpenMap.builder(); + allDisksOkBuilder.put("node1", new DiskUsage("node1","node1", "/foo/bar", 100, 50)); + allDisksOkBuilder.put("node2", new DiskUsage("node2","node2", "/foo/bar", 100, 50)); + final ImmutableOpenMap allDisksOk = allDisksOkBuilder.build(); + + final ImmutableOpenMap.Builder oneDiskAboveWatermarkBuilder = ImmutableOpenMap.builder(); + oneDiskAboveWatermarkBuilder.put("node1", new DiskUsage("node1", "node1", "/foo/bar", 100, between(5, 9))); + oneDiskAboveWatermarkBuilder.put("node2", new DiskUsage("node2", "node2", "/foo/bar", 100, 50)); + final ImmutableOpenMap oneDiskAboveWatermark = oneDiskAboveWatermarkBuilder.build(); + + // should not reroute when all disks are ok + currentTime.addAndGet(randomLongBetween(0, 120000)); + monitor.onNewInfo(new ClusterInfo(allDisksOk, null, null, null)); + assertNull(listenerReference.get()); + + // should reroute when one disk goes over the watermark + currentTime.addAndGet(randomLongBetween(0, 120000)); + monitor.onNewInfo(new ClusterInfo(oneDiskAboveWatermark, null, null, null)); + assertNotNull(listenerReference.get()); + listenerReference.getAndSet(null).onResponse(null); + + if (randomBoolean()) { + // should not re-route again within the reroute interval + currentTime.addAndGet(randomLongBetween(0, + DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING.get(Settings.EMPTY).millis())); + monitor.onNewInfo(new ClusterInfo(allDisksOk, null, null, null)); + assertNull(listenerReference.get()); + } + + // should reroute again when one disk is still over the watermark + currentTime.addAndGet(randomLongBetween( + DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING.get(Settings.EMPTY).millis() + 1, 120000)); + monitor.onNewInfo(new ClusterInfo(oneDiskAboveWatermark, null, null, null)); + assertNotNull(listenerReference.get()); + final ActionListener rerouteListener1 = listenerReference.getAndSet(null); + + // should not re-route again before reroute has completed + currentTime.addAndGet(randomLongBetween(0, 120000)); + monitor.onNewInfo(new ClusterInfo(allDisksOk, null, null, null)); + assertNull(listenerReference.get()); + + // complete reroute + rerouteListener1.onResponse(null); + + if (randomBoolean()) { + // should not re-route again within the reroute interval + currentTime.addAndGet(randomLongBetween(0, + DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING.get(Settings.EMPTY).millis())); + monitor.onNewInfo(new ClusterInfo(allDisksOk, null, null, null)); + assertNull(listenerReference.get()); + } + + // should reroute again after the reroute interval + currentTime.addAndGet(randomLongBetween( + DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING.get(Settings.EMPTY).millis() + 1, 120000)); + monitor.onNewInfo(new ClusterInfo(allDisksOk, null, null, null)); + assertNotNull(listenerReference.get()); + listenerReference.getAndSet(null).onResponse(null); + + // should not reroute again when it is not required + currentTime.addAndGet(randomLongBetween(0, 120000)); + monitor.onNewInfo(new ClusterInfo(allDisksOk, null, null, null)); + assertNull(listenerReference.get()); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java index 4580c5b59ed..595c144fa17 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster.routing.allocation.decider; -import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; import org.elasticsearch.cluster.ClusterInfo; import org.elasticsearch.cluster.ClusterInfoService; import org.elasticsearch.cluster.ClusterState; @@ -31,7 +30,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.junit.annotations.TestLogging; import java.util.ArrayList; import java.util.Collection; @@ -53,7 +51,6 @@ public class MockDiskUsagesIT extends ESIntegTestCase { return Collections.singletonList(MockInternalClusterInfoService.TestPlugin.class); } - @TestLogging("org.elasticsearch.indices.recovery:TRACE,org.elasticsearch.cluster.service:TRACE") public void testRerouteOccursOnDiskPassingHighWatermark() throws Exception { List nodes = internalCluster().startNodes(3); @@ -105,12 +102,6 @@ public class MockDiskUsagesIT extends ESIntegTestCase { assertBusy(() -> { final ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); - logger.info("--> {}", clusterState.routingTable()); - - final RecoveryResponse recoveryResponse = client().admin().indices() - .prepareRecoveries("test").setActiveOnly(true).setDetailed(true).get(); - logger.info("--> recoveries: {}", recoveryResponse); - final Map nodesToShardCount = new HashMap<>(); for (final RoutingNode node : clusterState.getRoutingNodes()) { logger.info("--> node {} has {} shards", diff --git a/server/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java b/server/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java index f4e6647cc05..0d16bdc3b05 100644 --- a/server/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java @@ -139,7 +139,7 @@ public class NodeJoinControllerTests extends ESTestCase { } masterService = ClusterServiceUtils.createMasterService(threadPool, initialState); nodeJoinController = new NodeJoinController(Settings.EMPTY, masterService, createAllocationService(Settings.EMPTY), - new ElectMasterService(Settings.EMPTY), s -> {}); + new ElectMasterService(Settings.EMPTY), (s, r) -> {}); } public void testSimpleJoinAccumulation() throws InterruptedException, ExecutionException { diff --git a/server/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java b/server/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java index 7eea2e24bd8..037d9f4174f 100644 --- a/server/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java @@ -370,7 +370,7 @@ public class ZenDiscoveryUnitTests extends ESTestCase { new NamedWriteableRegistry(ClusterModule.getNamedWriteables()), masterService, clusterApplier, clusterSettings, hostsResolver -> Collections.emptyList(), ESAllocationTestCase.createAllocationService(), - Collections.emptyList(), mock(GatewayMetaState.class), s -> {}); + Collections.emptyList(), mock(GatewayMetaState.class), (s, r) -> {}); zenDiscovery.start(); return zenDiscovery; } diff --git a/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java b/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java index e9e9a094c16..95ca2aacf13 100644 --- a/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java +++ b/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java @@ -213,7 +213,7 @@ public class ClusterStateChanges { transportService, clusterService, threadPool, createIndexService, actionFilters, indexNameExpressionResolver); nodeRemovalExecutor = new NodeRemovalClusterStateTaskExecutor(allocationService, logger); - joinTaskExecutor = new JoinTaskExecutor(Settings.EMPTY, allocationService, logger, s -> {}); + joinTaskExecutor = new JoinTaskExecutor(Settings.EMPTY, allocationService, logger, (s, r) -> {}); } public ClusterState createIndex(ClusterState state, CreateIndexRequest request) { diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java index 0ebc0402f96..28a706c7fca 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java @@ -1065,7 +1065,7 @@ public class SnapshotResiliencyTests extends ESTestCase { transportService, indicesService, actionFilters, indexNameExpressionResolver); final ShardStateAction shardStateAction = new ShardStateAction( clusterService, transportService, allocationService, - new RoutingService(clusterService, allocationService), + new RoutingService(clusterService, allocationService::reroute), threadPool ); final MetaDataMappingService metaDataMappingService = new MetaDataMappingService(clusterService, indicesService); @@ -1248,7 +1248,7 @@ public class SnapshotResiliencyTests extends ESTestCase { hostsResolver -> testClusterNodes.nodes.values().stream().filter(n -> n.node.isMasterNode()) .map(n -> n.node.getAddress()).collect(Collectors.toList()), clusterService.getClusterApplierService(), Collections.emptyList(), random(), - new RoutingService(clusterService, allocationService)::reroute, ElectionStrategy.DEFAULT_INSTANCE); + new RoutingService(clusterService, allocationService::reroute)::reroute, ElectionStrategy.DEFAULT_INSTANCE); masterService.setClusterStatePublisher(coordinator); coordinator.start(); masterService.start(); diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java b/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java index 8bdedaceba7..0c27f84d7f1 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java @@ -841,7 +841,7 @@ public class AbstractCoordinatorTestCase extends ESTestCase { final AllocationService allocationService = ESAllocationTestCase.createAllocationService(Settings.EMPTY); coordinator = new Coordinator("test_node", settings, clusterSettings, transportService, writableRegistry(), allocationService, masterService, this::getPersistedState, - Cluster.this::provideSeedHosts, clusterApplierService, onJoinValidators, Randomness.get(), s -> {}, + Cluster.this::provideSeedHosts, clusterApplierService, onJoinValidators, Randomness.get(), (s, r) -> {}, getElectionStrategy()); masterService.setClusterStatePublisher(coordinator); final GatewayService gatewayService = new GatewayService(settings, allocationService, clusterService, From 8f49d01113500b1f8e405fbe2142c0bb85547bed Mon Sep 17 00:00:00 2001 From: Dimitris Athanasiou Date: Sun, 30 Jun 2019 19:37:00 +0300 Subject: [PATCH 42/42] [7.x][ML] Rename df-analytics `_id_copy` to `ml__id_copy` (#43754) (#43783) Renames `_id_copy` to `ml__id_copy` as field names starting with underscore are deprecated. The new field name `ml__id_copy` was chosen as an obscure enough field that users won't have in their data. Otherwise, this field is only intented to be used by df-analytics. --- .../integration/RunDataFrameAnalyticsIT.java | 6 +++++ .../dataframe/DataFrameAnalyticsFields.java | 20 -------------- .../ml/dataframe/DataFrameAnalyticsIndex.java | 27 ++++++++++++------- .../dataframe/DataFrameAnalyticsManager.java | 2 +- .../extractor/DataFrameDataExtractor.java | 4 +-- .../DataFrameAnalyticsIndexTests.java | 4 +-- .../DataFrameDataExtractorTests.java | 2 +- 7 files changed, 30 insertions(+), 35 deletions(-) delete mode 100644 x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsFields.java diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/RunDataFrameAnalyticsIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/RunDataFrameAnalyticsIT.java index 5696edcf646..c7295ce24db 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/RunDataFrameAnalyticsIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/RunDataFrameAnalyticsIT.java @@ -91,7 +91,10 @@ public class RunDataFrameAnalyticsIT extends MlNativeDataFrameAnalyticsIntegTest assertThat(destDoc.get(field), equalTo(sourceDoc.get(field))); } assertThat(destDoc.containsKey("ml"), is(true)); + + @SuppressWarnings("unchecked") Map resultsObject = (Map) destDoc.get("ml"); + assertThat(resultsObject.containsKey("outlier_score"), is(true)); double outlierScore = (double) resultsObject.get("outlier_score"); assertThat(outlierScore, allOf(greaterThanOrEqualTo(0.0), lessThanOrEqualTo(100.0))); @@ -209,7 +212,10 @@ public class RunDataFrameAnalyticsIT extends MlNativeDataFrameAnalyticsIntegTest assertThat(destDoc.get(field), equalTo(sourceDoc.get(field))); } assertThat(destDoc.containsKey("ml"), is(true)); + + @SuppressWarnings("unchecked") Map resultsObject = (Map) destDoc.get("ml"); + assertThat(resultsObject.containsKey("outlier_score"), is(true)); double outlierScore = (double) resultsObject.get("outlier_score"); assertThat(outlierScore, allOf(greaterThanOrEqualTo(0.0), lessThanOrEqualTo(100.0))); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsFields.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsFields.java deleted file mode 100644 index 4ade30ae68b..00000000000 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsFields.java +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.ml.dataframe; - -public final class DataFrameAnalyticsFields { - - public static final String ID = "_id_copy"; - - // Metadata fields - static final String CREATION_DATE_MILLIS = "creation_date_in_millis"; - static final String VERSION = "version"; - static final String CREATED = "created"; - static final String CREATED_BY = "created_by"; - static final String ANALYTICS = "analytics"; - - private DataFrameAnalyticsFields() {} -} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsIndex.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsIndex.java index e07eb99a3f5..8ae7db0a9fb 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsIndex.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsIndex.java @@ -42,7 +42,16 @@ import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; /** * {@link DataFrameAnalyticsIndex} class encapsulates logic for creating destination index based on source index metadata. */ -final class DataFrameAnalyticsIndex { +public final class DataFrameAnalyticsIndex { + + public static final String ID_COPY = "ml__id_copy"; + + // Metadata fields + static final String CREATION_DATE_MILLIS = "creation_date_in_millis"; + static final String VERSION = "version"; + static final String CREATED = "created"; + static final String CREATED_BY = "created_by"; + static final String ANALYTICS = "analytics"; private static final String PROPERTIES = "properties"; private static final String META = "_meta"; @@ -122,7 +131,7 @@ final class DataFrameAnalyticsIndex { Integer maxNumberOfReplicas = findMaxSettingValue(settingsResponse, IndexMetaData.SETTING_NUMBER_OF_REPLICAS); Settings.Builder settingsBuilder = Settings.builder(); - settingsBuilder.put(IndexSortConfig.INDEX_SORT_FIELD_SETTING.getKey(), DataFrameAnalyticsFields.ID); + settingsBuilder.put(IndexSortConfig.INDEX_SORT_FIELD_SETTING.getKey(), ID_COPY); settingsBuilder.put(IndexSortConfig.INDEX_SORT_ORDER_SETTING.getKey(), SortOrder.ASC); if (maxNumberOfShards != null) { settingsBuilder.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, maxNumberOfShards); @@ -151,17 +160,17 @@ final class DataFrameAnalyticsIndex { Map properties = getOrPutDefault(mappingsAsMap, PROPERTIES, HashMap::new); Map idCopyMapping = new HashMap<>(); idCopyMapping.put("type", "keyword"); - properties.put(DataFrameAnalyticsFields.ID, idCopyMapping); + properties.put(ID_COPY, idCopyMapping); } private static void addMetaData(Map mappingsAsMap, String analyticsId, Clock clock) { Map metadata = getOrPutDefault(mappingsAsMap, META, HashMap::new); - metadata.put(DataFrameAnalyticsFields.CREATION_DATE_MILLIS, clock.millis()); - metadata.put(DataFrameAnalyticsFields.CREATED_BY, "data-frame-analytics"); + metadata.put(CREATION_DATE_MILLIS, clock.millis()); + metadata.put(CREATED_BY, "data-frame-analytics"); Map versionMapping = new HashMap<>(); - versionMapping.put(DataFrameAnalyticsFields.CREATED, Version.CURRENT); - metadata.put(DataFrameAnalyticsFields.VERSION, versionMapping); - metadata.put(DataFrameAnalyticsFields.ANALYTICS, analyticsId); + versionMapping.put(CREATED, Version.CURRENT); + metadata.put(VERSION, versionMapping); + metadata.put(ANALYTICS, analyticsId); } private static V getOrPutDefault(Map map, K key, Supplier valueSupplier) { @@ -182,7 +191,7 @@ final class DataFrameAnalyticsIndex { String type = mappings.keysIt().next(); Map addedMappings = Collections.singletonMap(PROPERTIES, - Collections.singletonMap(DataFrameAnalyticsFields.ID, Collections.singletonMap("type", "keyword"))); + Collections.singletonMap(ID_COPY, Collections.singletonMap("type", "keyword"))); PutMappingRequest putMappingRequest = new PutMappingRequest(getIndexResponse.indices()); putMappingRequest.type(type); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsManager.java index c7cfe2b6253..9132e0f8192 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsManager.java @@ -150,7 +150,7 @@ public class DataFrameAnalyticsManager { reindexRequest.setSourceIndices(config.getSource().getIndex()); reindexRequest.setSourceQuery(config.getSource().getParsedQuery()); reindexRequest.setDestIndex(config.getDest().getIndex()); - reindexRequest.setScript(new Script("ctx._source." + DataFrameAnalyticsFields.ID + " = ctx._id")); + reindexRequest.setScript(new Script("ctx._source." + DataFrameAnalyticsIndex.ID_COPY + " = ctx._id")); final ThreadContext threadContext = client.threadPool().getThreadContext(); final Supplier supplier = threadContext.newRestorableContext(false); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractor.java index 59cd78b4cc6..fa18f3bb25b 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractor.java @@ -23,7 +23,7 @@ import org.elasticsearch.search.fetch.StoredFieldsContext; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.ml.datafeed.extractor.fields.ExtractedField; -import org.elasticsearch.xpack.ml.dataframe.DataFrameAnalyticsFields; +import org.elasticsearch.xpack.ml.dataframe.DataFrameAnalyticsIndex; import java.io.IOException; import java.util.ArrayList; @@ -126,7 +126,7 @@ public class DataFrameDataExtractor { .setScroll(SCROLL_TIMEOUT) // This ensures the search throws if there are failures and the scroll context gets cleared automatically .setAllowPartialSearchResults(false) - .addSort(DataFrameAnalyticsFields.ID, SortOrder.ASC) + .addSort(DataFrameAnalyticsIndex.ID_COPY, SortOrder.ASC) .setIndices(context.indices) .setSize(context.scrollSize) .setQuery(context.query); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsIndexTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsIndexTests.java index 80391de519e..7079a3295bd 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsIndexTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsIndexTests.java @@ -167,12 +167,12 @@ public class DataFrameAnalyticsIndexTests extends ESTestCase { containsInAnyOrder("index.number_of_shards", "index.number_of_replicas", "index.sort.field", "index.sort.order")); assertThat(createIndexRequest.settings().getAsInt("index.number_of_shards", -1), equalTo(5)); assertThat(createIndexRequest.settings().getAsInt("index.number_of_replicas", -1), equalTo(1)); - assertThat(createIndexRequest.settings().get("index.sort.field"), equalTo("_id_copy")); + assertThat(createIndexRequest.settings().get("index.sort.field"), equalTo("ml__id_copy")); assertThat(createIndexRequest.settings().get("index.sort.order"), equalTo("asc")); try (XContentParser parser = createParser(JsonXContent.jsonXContent, createIndexRequest.mappings().get("_doc"))) { Map map = parser.map(); - assertThat(extractValue("_doc.properties._id_copy.type", map), equalTo("keyword")); + assertThat(extractValue("_doc.properties.ml__id_copy.type", map), equalTo("keyword")); assertThat(extractValue("_doc.properties.field_1", map), equalTo("field_1_mappings")); assertThat(extractValue("_doc.properties.field_2", map), equalTo("field_2_mappings")); assertThat(extractValue("_doc._meta.analytics", map), equalTo(ANALYTICS_ID)); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractorTests.java index 778b2826a72..71baa0bb94f 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractorTests.java @@ -127,7 +127,7 @@ public class DataFrameDataExtractorTests extends ESTestCase { assertThat(searchRequest, containsString("\"query\":{\"match_all\":{\"boost\":1.0}}")); assertThat(searchRequest, containsString("\"docvalue_fields\":[{\"field\":\"field_1\"},{\"field\":\"field_2\"}]")); assertThat(searchRequest, containsString("\"_source\":{\"includes\":[],\"excludes\":[]}")); - assertThat(searchRequest, containsString("\"sort\":[{\"_id_copy\":{\"order\":\"asc\"}}]")); + assertThat(searchRequest, containsString("\"sort\":[{\"ml__id_copy\":{\"order\":\"asc\"}}]")); // Check continue scroll requests had correct ids assertThat(dataExtractor.capturedContinueScrollIds.size(), equalTo(2));