From 132ccbec2fdeebe77009d26cc93bd0709b7b8479 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Wed, 19 Dec 2018 09:49:23 +0100 Subject: [PATCH 01/20] [Docs] Extend common-grams-tokenfilter doctest example (#36807) Adding an example output using the "_analyze" API and expected response. --- .../common-grams-tokenfilter.asciidoc | 102 +++++++++++++++++- 1 file changed, 100 insertions(+), 2 deletions(-) diff --git a/docs/reference/analysis/tokenfilters/common-grams-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/common-grams-tokenfilter.asciidoc index 3cf06dc6e88..361160ac6ac 100644 --- a/docs/reference/analysis/tokenfilters/common-grams-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/common-grams-tokenfilter.asciidoc @@ -58,12 +58,12 @@ PUT /common_grams_example "filter": { "common_grams": { "type": "common_grams", - "common_words": ["a", "an", "the"] + "common_words": ["the", "is", "a"] }, "common_grams_query": { "type": "common_grams", "query_mode": true, - "common_words": ["a", "an", "the"] + "common_words": ["the", "is", "a"] } } } @@ -71,3 +71,101 @@ PUT /common_grams_example } -------------------------------------------------- // CONSOLE + +You can see the output by using e.g. the `_analyze` endpoint: + +[source,js] +-------------------------------------------------- +POST /common_grams_example/_analyze +{ + "analyzer" : "index_grams", + "text" : "the quick brown is a fox" +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +And the response will be: + +[source,js] +-------------------------------------------------- +{ + "tokens" : [ + { + "token" : "the", + "start_offset" : 0, + "end_offset" : 3, + "type" : "word", + "position" : 0 + }, + { + "token" : "the_quick", + "start_offset" : 0, + "end_offset" : 9, + "type" : "gram", + "position" : 0, + "positionLength" : 2 + }, + { + "token" : "quick", + "start_offset" : 4, + "end_offset" : 9, + "type" : "word", + "position" : 1 + }, + { + "token" : "brown", + "start_offset" : 10, + "end_offset" : 15, + "type" : "word", + "position" : 2 + }, + { + "token" : "brown_is", + "start_offset" : 10, + "end_offset" : 18, + "type" : "gram", + "position" : 2, + "positionLength" : 2 + }, + { + "token" : "is", + "start_offset" : 16, + "end_offset" : 18, + "type" : "word", + "position" : 3 + }, + { + "token" : "is_a", + "start_offset" : 16, + "end_offset" : 20, + "type" : "gram", + "position" : 3, + "positionLength" : 2 + }, + { + "token" : "a", + "start_offset" : 19, + "end_offset" : 20, + "type" : "word", + "position" : 4 + }, + { + "token" : "a_fox", + "start_offset" : 19, + "end_offset" : 24, + "type" : "gram", + "position" : 4, + "positionLength" : 2 + }, + { + "token" : "fox", + "start_offset" : 21, + "end_offset" : 24, + "type" : "word", + "position" : 5 + } + ] +} +-------------------------------------------------- +// TESTRESPONSE \ No newline at end of file From dd540ef6180b8838c79aad31efc69e4ad1e2b5f8 Mon Sep 17 00:00:00 2001 From: Alan Woodward Date: Wed, 19 Dec 2018 08:55:05 +0000 Subject: [PATCH 02/20] Use index-prefix fields for terms of length min_chars - 1 (#36703) The default index_prefix settings will index prefixes of between 2 and 5 characters in length. Currently, if a prefix search falls outside of this range at either end we fall back to a standard prefix expansion, which is still very expensive for single character prefixes. However, we have an option here to use a wildcard expansion rather than a prefix expansion, so that a query of a* gets remapped to a? against the _index_prefix field - likely to be a very small set of terms, and certain to be much smaller than a* against the whole index. This commit adds this extra level of mapping for any prefix term whose length is one less than the min_chars parameter of the index_prefixes field. --- .../test/search/190_index_prefix_search.yml | 15 +++++++ .../index/mapper/TextFieldMapper.java | 25 ++++++++++-- .../index/mapper/TextFieldMapperTests.java | 39 +++---------------- .../index/mapper/TextFieldTypeTests.java | 26 +++++++++++++ 4 files changed, 68 insertions(+), 37 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/190_index_prefix_search.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/190_index_prefix_search.yml index 016bb7c6046..7b89dd620a9 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/190_index_prefix_search.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/190_index_prefix_search.yml @@ -58,6 +58,21 @@ setup: - match: {hits.max_score: 2} - match: {hits.hits.0._score: 2} + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + query_string: + default_field: text + query: s* + boost: 2 + + - match: {hits.total: 1} + - match: {hits.max_score: 2} + - match: {hits.hits.0._score: 2} + - do: search: rest_total_hits_as_int: true diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java index 5987e167dc9..8a9c141b0af 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java @@ -32,6 +32,7 @@ import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.Term; +import org.apache.lucene.search.AutomatonQuery; import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.MultiPhraseQuery; import org.apache.lucene.search.MultiTermQuery; @@ -40,6 +41,9 @@ import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.intervals.IntervalsSource; +import org.apache.lucene.util.automaton.Automata; +import org.apache.lucene.util.automaton.Automaton; +import org.apache.lucene.util.automaton.Operations; import org.elasticsearch.Version; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.settings.Settings; @@ -360,7 +364,7 @@ public class TextFieldMapper extends FieldMapper { } boolean accept(int length) { - return length >= minChars && length <= maxChars; + return length >= minChars - 1 && length <= maxChars; } void doXContent(XContentBuilder builder) throws IOException { @@ -370,6 +374,22 @@ public class TextFieldMapper extends FieldMapper { builder.endObject(); } + @Override + public Query prefixQuery(String value, MultiTermQuery.RewriteMethod method, QueryShardContext context) { + if (value.length() >= minChars) { + return super.termQuery(value, context); + } + List automata = new ArrayList<>(); + automata.add(Automata.makeString(value)); + for (int i = value.length(); i < minChars; i++) { + automata.add(Automata.makeAnyChar()); + } + Automaton automaton = Operations.concatenate(automata); + AutomatonQuery query = new AutomatonQuery(new Term(name(), value + "*"), automaton); + query.setRewriteMethod(method); + return query; + } + @Override public PrefixFieldType clone() { return new PrefixFieldType(name(), minChars, maxChars); @@ -402,7 +422,6 @@ public class TextFieldMapper extends FieldMapper { @Override public int hashCode() { - return Objects.hash(super.hashCode(), minChars, maxChars); } } @@ -564,7 +583,7 @@ public class TextFieldMapper extends FieldMapper { if (prefixFieldType == null || prefixFieldType.accept(value.length()) == false) { return super.prefixQuery(value, method, context); } - Query tq = prefixFieldType.termQuery(value, context); + Query tq = prefixFieldType.prefixQuery(value, method, context); if (method == null || method == MultiTermQuery.CONSTANT_SCORE_REWRITE || method == MultiTermQuery.CONSTANT_SCORE_BOOLEAN_REWRITE) { return new ConstantScoreQuery(tq); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java index 56caa94466c..f149620b028 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java @@ -31,10 +31,8 @@ import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.PostingsEnum; import org.apache.lucene.index.Term; import org.apache.lucene.index.TermsEnum; -import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.MultiPhraseQuery; import org.apache.lucene.search.PhraseQuery; -import org.apache.lucene.search.PrefixQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; @@ -71,10 +69,8 @@ import java.util.Collections; import java.util.HashMap; import java.util.Map; -import static org.apache.lucene.search.MultiTermQuery.CONSTANT_SCORE_REWRITE; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.core.Is.is; public class TextFieldMapperTests extends ESSingleNodeTestCase { @@ -817,18 +813,13 @@ public class TextFieldMapperTests extends ESSingleNodeTestCase { public void testIndexPrefixMapping() throws IOException { - QueryShardContext queryShardContext = indexService.newQueryShardContext( - randomInt(20), null, () -> { - throw new UnsupportedOperationException(); - }, null); - { String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("field") .field("type", "text") .field("analyzer", "standard") .startObject("index_prefixes") - .field("min_chars", 1) + .field("min_chars", 2) .field("max_chars", 10) .endObject() .endObject().endObject() @@ -837,16 +828,7 @@ public class TextFieldMapperTests extends ESSingleNodeTestCase { DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); - assertThat(mapper.mappers().getMapper("field._index_prefix").toString(), containsString("prefixChars=1:10")); - - FieldMapper fieldMapper = (FieldMapper) mapper.mappers().getMapper("field"); - MappedFieldType fieldType = fieldMapper.fieldType; - - Query q = fieldType.prefixQuery("goin", CONSTANT_SCORE_REWRITE, queryShardContext); - - assertEquals(new ConstantScoreQuery(new TermQuery(new Term("field._index_prefix", "goin"))), q); - q = fieldType.prefixQuery("internationalisatio", CONSTANT_SCORE_REWRITE, queryShardContext); - assertEquals(new PrefixQuery(new Term("field", "internationalisatio")), q); + assertThat(mapper.mappers().getMapper("field._index_prefix").toString(), containsString("prefixChars=2:10")); ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", BytesReference .bytes(XContentFactory.jsonBuilder() @@ -870,17 +852,8 @@ public class TextFieldMapperTests extends ESSingleNodeTestCase { CompressedXContent json = new CompressedXContent(mapping); DocumentMapper mapper = parser.parse("type", json); - FieldMapper fieldMapper = (FieldMapper) mapper.mappers().getMapper("field"); - MappedFieldType fieldType = fieldMapper.fieldType; + assertThat(mapper.mappers().getMapper("field._index_prefix").toString(), containsString("prefixChars=2:5")); - Query q1 = fieldType.prefixQuery("g", CONSTANT_SCORE_REWRITE, queryShardContext); - assertThat(q1, instanceOf(PrefixQuery.class)); - Query q2 = fieldType.prefixQuery("go", CONSTANT_SCORE_REWRITE, queryShardContext); - assertThat(q2, instanceOf(ConstantScoreQuery.class)); - Query q5 = fieldType.prefixQuery("going", CONSTANT_SCORE_REWRITE, queryShardContext); - assertThat(q5, instanceOf(ConstantScoreQuery.class)); - Query q6 = fieldType.prefixQuery("goings", CONSTANT_SCORE_REWRITE, queryShardContext); - assertThat(q6, instanceOf(PrefixQuery.class)); } { @@ -898,10 +871,8 @@ public class TextFieldMapperTests extends ESSingleNodeTestCase { .endObject().endObject() .endObject().endObject()); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> { - indexService.mapperService() - .merge("type", new CompressedXContent(illegalMapping), MergeReason.MAPPING_UPDATE); - }); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> + indexService.mapperService().merge("type", new CompressedXContent(illegalMapping), MergeReason.MAPPING_UPDATE)); assertThat(e.getMessage(), containsString("Field [field._index_prefix] is defined twice in [type]")); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldTypeTests.java index 877553bacf9..2af659b6e20 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldTypeTests.java @@ -20,11 +20,18 @@ package org.elasticsearch.index.mapper; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.Term; +import org.apache.lucene.search.AutomatonQuery; +import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.FuzzyQuery; +import org.apache.lucene.search.PrefixQuery; +import org.apache.lucene.search.Query; import org.apache.lucene.search.RegexpQuery; import org.apache.lucene.search.TermInSetQuery; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.automaton.Automata; +import org.apache.lucene.util.automaton.Automaton; +import org.apache.lucene.util.automaton.Operations; import org.elasticsearch.common.unit.Fuzziness; import org.junit.Before; @@ -32,6 +39,8 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import static org.apache.lucene.search.MultiTermQuery.CONSTANT_SCORE_REWRITE; + public class TextFieldTypeTests extends FieldTypeTestCase { @Override protected MappedFieldType createDefaultFieldType() { @@ -143,4 +152,21 @@ public class TextFieldTypeTests extends FieldTypeTestCase { () -> ft.fuzzyQuery("foo", Fuzziness.fromEdits(2), 1, 50, true)); assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage()); } + + public void testIndexPrefixes() { + TextFieldMapper.TextFieldType ft = new TextFieldMapper.TextFieldType(); + ft.setName("field"); + ft.setPrefixFieldType(new TextFieldMapper.PrefixFieldType("field._index_prefix", 2, 10)); + + Query q = ft.prefixQuery("goin", CONSTANT_SCORE_REWRITE, null); + assertEquals(new ConstantScoreQuery(new TermQuery(new Term("field._index_prefix", "goin"))), q); + + q = ft.prefixQuery("internationalisatio", CONSTANT_SCORE_REWRITE, null); + assertEquals(new PrefixQuery(new Term("field", "internationalisatio")), q); + + q = ft.prefixQuery("g", CONSTANT_SCORE_REWRITE, null); + Automaton automaton + = Operations.concatenate(Arrays.asList(Automata.makeChar('g'), Automata.makeAnyChar())); + assertEquals(new ConstantScoreQuery(new AutomatonQuery(new Term("field._index_prefix", "g*"), automaton)), q); + } } From d2ce576c8cfd43aedabd0bf5c695d8745156c69f Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Wed, 19 Dec 2018 10:47:17 +0100 Subject: [PATCH 03/20] Use SearchRequest copy constructor in ExpandSearchPhase (#36772) Relates to #36641 --- .../action/search/ExpandSearchPhase.java | 19 ++----------------- 1 file changed, 2 insertions(+), 17 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java index 917ff06c573..da481b7a4a8 100644 --- a/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java @@ -91,7 +91,8 @@ final class ExpandSearchPhase extends SearchPhase { SearchSourceBuilder sourceBuilder = buildExpandSearchSourceBuilder(innerHitBuilder, innerCollapseBuilder) .query(groupQuery) .postFilter(searchRequest.source().postFilter()); - SearchRequest groupRequest = buildExpandSearchRequest(searchRequest, sourceBuilder); + SearchRequest groupRequest = new SearchRequest(searchRequest); + groupRequest.source(sourceBuilder); multiRequest.add(groupRequest); } } @@ -120,22 +121,6 @@ final class ExpandSearchPhase extends SearchPhase { } } - private SearchRequest buildExpandSearchRequest(SearchRequest orig, SearchSourceBuilder sourceBuilder) { - SearchRequest groupRequest = new SearchRequest(orig.indices()) - .types(orig.types()) - .source(sourceBuilder) - .indicesOptions(orig.indicesOptions()) - .requestCache(orig.requestCache()) - .preference(orig.preference()) - .routing(orig.routing()) - .searchType(orig.searchType()); - if (orig.allowPartialSearchResults() != null){ - groupRequest.allowPartialSearchResults(orig.allowPartialSearchResults()); - } - groupRequest.setMaxConcurrentShardRequests(orig.getMaxConcurrentShardRequests()); - return groupRequest; - } - private SearchSourceBuilder buildExpandSearchSourceBuilder(InnerHitBuilder options, CollapseBuilder innerCollapseBuilder) { SearchSourceBuilder groupSource = new SearchSourceBuilder(); groupSource.from(options.getFrom()); From 1345dff5076a57981df40cea01b4c3a7f33ea08b Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Wed, 19 Dec 2018 11:29:21 +0100 Subject: [PATCH 04/20] Fix line length in org.elasticsearch.snapshots (#36646) Remove the line length suppression for this package and fix offending lines in both main and test relates #34884 --- .../resources/checkstyle_suppressions.xml | 4 - .../elasticsearch/snapshots/RestoreInfo.java | 3 +- .../snapshots/RestoreService.java | 92 +++++++++++++------ .../elasticsearch/snapshots/SnapshotInfo.java | 13 ++- .../snapshots/SnapshotShardsService.java | 32 ++++--- .../snapshots/SnapshotsService.java | 88 ++++++++++++------ .../DedicatedClusterSnapshotRestoreIT.java | 19 +++- .../SharedClusterSnapshotRestoreIT.java | 9 +- .../snapshots/mockstore/MockRepository.java | 3 +- 9 files changed, 173 insertions(+), 90 deletions(-) diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index 1e8b6948ea6..b74f2b71e39 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -56,10 +56,6 @@ - - - - diff --git a/server/src/main/java/org/elasticsearch/snapshots/RestoreInfo.java b/server/src/main/java/org/elasticsearch/snapshots/RestoreInfo.java index 15d5d1ca268..bc87c49dcca 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/RestoreInfo.java +++ b/server/src/main/java/org/elasticsearch/snapshots/RestoreInfo.java @@ -142,7 +142,8 @@ public class RestoreInfo implements ToXContentObject, Streamable { return builder; } - private static final ObjectParser PARSER = new ObjectParser<>(RestoreInfo.class.getName(), true, RestoreInfo::new); + private static final ObjectParser PARSER = new ObjectParser<>(RestoreInfo.class.getName(), + true, RestoreInfo::new); static { ObjectParser shardsParser = new ObjectParser<>("shards", true, null); diff --git a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java index 4c6090758dd..eecac92d63e 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -103,8 +103,8 @@ import static org.elasticsearch.snapshots.SnapshotUtils.filterIndices; * First {@link #restoreSnapshot(RestoreRequest, org.elasticsearch.action.ActionListener)} * method reads information about snapshot and metadata from repository. In update cluster state task it checks restore * preconditions, restores global state if needed, creates {@link RestoreInProgress} record with list of shards that needs - * to be restored and adds this shard to the routing table using {@link RoutingTable.Builder#addAsRestore(IndexMetaData, SnapshotRecoverySource)} - * method. + * to be restored and adds this shard to the routing table using + * {@link RoutingTable.Builder#addAsRestore(IndexMetaData, SnapshotRecoverySource)} method. *

* Individual shards are getting restored as part of normal recovery process in * {@link IndexShard#restoreFromRepository(Repository)} )} @@ -256,15 +256,18 @@ public class RestoreService implements ClusterStateApplier { for (Map.Entry indexEntry : indices.entrySet()) { String index = indexEntry.getValue(); boolean partial = checkPartial(index); - SnapshotRecoverySource recoverySource = new SnapshotRecoverySource(restoreUUID, snapshot, snapshotInfo.version(), index); + SnapshotRecoverySource recoverySource = + new SnapshotRecoverySource(restoreUUID, snapshot, snapshotInfo.version(), index); String renamedIndexName = indexEntry.getKey(); IndexMetaData snapshotIndexMetaData = metaData.index(index); - snapshotIndexMetaData = updateIndexSettings(snapshotIndexMetaData, request.indexSettings, request.ignoreIndexSettings); + snapshotIndexMetaData = updateIndexSettings(snapshotIndexMetaData, + request.indexSettings, request.ignoreIndexSettings); try { snapshotIndexMetaData = metaDataIndexUpgradeService.upgradeIndexMetaData(snapshotIndexMetaData, minIndexCompatibilityVersion); } catch (Exception ex) { - throw new SnapshotRestoreException(snapshot, "cannot restore index [" + index + "] because it cannot be upgraded", ex); + throw new SnapshotRestoreException(snapshot, "cannot restore index [" + index + "] because it cannot be " + + "upgraded", ex); } // Check that the index is closed or doesn't exist IndexMetaData currentIndexMetaData = currentState.metaData().index(renamedIndexName); @@ -274,9 +277,16 @@ public class RestoreService implements ClusterStateApplier { // Index doesn't exist - create it and start recovery // Make sure that the index we are about to create has a validate name MetaDataCreateIndexService.validateIndexName(renamedIndexName, currentState); - createIndexService.validateIndexSettings(renamedIndexName, snapshotIndexMetaData.getSettings(), currentState, false); - IndexMetaData.Builder indexMdBuilder = IndexMetaData.builder(snapshotIndexMetaData).state(IndexMetaData.State.OPEN).index(renamedIndexName); - indexMdBuilder.settings(Settings.builder().put(snapshotIndexMetaData.getSettings()).put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID())); + createIndexService.validateIndexSettings(renamedIndexName, + snapshotIndexMetaData.getSettings(), + currentState, + false); + IndexMetaData.Builder indexMdBuilder = IndexMetaData.builder(snapshotIndexMetaData) + .state(IndexMetaData.State.OPEN) + .index(renamedIndexName); + indexMdBuilder.settings(Settings.builder() + .put(snapshotIndexMetaData.getSettings()) + .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID())); if (!request.includeAliases() && !snapshotIndexMetaData.getAliases().isEmpty()) { // Remove all aliases - they shouldn't be restored indexMdBuilder.removeAllAliases(); @@ -296,10 +306,13 @@ public class RestoreService implements ClusterStateApplier { } else { validateExistingIndex(currentIndexMetaData, snapshotIndexMetaData, renamedIndexName, partial); // Index exists and it's closed - open it in metadata and start recovery - IndexMetaData.Builder indexMdBuilder = IndexMetaData.builder(snapshotIndexMetaData).state(IndexMetaData.State.OPEN); + IndexMetaData.Builder indexMdBuilder = IndexMetaData.builder(snapshotIndexMetaData) + .state(IndexMetaData.State.OPEN); indexMdBuilder.version(Math.max(snapshotIndexMetaData.getVersion(), currentIndexMetaData.getVersion() + 1)); - indexMdBuilder.mappingVersion(Math.max(snapshotIndexMetaData.getMappingVersion(), currentIndexMetaData.getMappingVersion() + 1)); - indexMdBuilder.settingsVersion(Math.max(snapshotIndexMetaData.getSettingsVersion(), currentIndexMetaData.getSettingsVersion() + 1)); + indexMdBuilder.mappingVersion(Math.max(snapshotIndexMetaData.getMappingVersion(), + currentIndexMetaData.getMappingVersion() + 1)); + indexMdBuilder.settingsVersion(Math.max(snapshotIndexMetaData.getSettingsVersion(), + currentIndexMetaData.getSettingsVersion() + 1)); if (!request.includeAliases()) { // Remove all snapshot aliases if (!snapshotIndexMetaData.getAliases().isEmpty()) { @@ -314,7 +327,10 @@ public class RestoreService implements ClusterStateApplier { aliases.add(alias.value); } } - indexMdBuilder.settings(Settings.builder().put(snapshotIndexMetaData.getSettings()).put(IndexMetaData.SETTING_INDEX_UUID, currentIndexMetaData.getIndexUUID())); + indexMdBuilder.settings(Settings.builder() + .put(snapshotIndexMetaData.getSettings()) + .put(IndexMetaData.SETTING_INDEX_UUID, + currentIndexMetaData.getIndexUUID())); IndexMetaData updatedIndexMetaData = indexMdBuilder.index(renamedIndexName).build(); rtBuilder.addAsRestore(updatedIndexMetaData, recoverySource); blocks.updateBlocks(updatedIndexMetaData); @@ -324,9 +340,12 @@ public class RestoreService implements ClusterStateApplier { for (int shard = 0; shard < snapshotIndexMetaData.getNumberOfShards(); shard++) { if (!ignoreShards.contains(shard)) { - shardsBuilder.put(new ShardId(renamedIndex, shard), new RestoreInProgress.ShardRestoreStatus(clusterService.state().nodes().getLocalNodeId())); + shardsBuilder.put(new ShardId(renamedIndex, shard), + new RestoreInProgress.ShardRestoreStatus(clusterService.state().nodes().getLocalNodeId())); } else { - shardsBuilder.put(new ShardId(renamedIndex, shard), new RestoreInProgress.ShardRestoreStatus(clusterService.state().nodes().getLocalNodeId(), RestoreInProgress.State.FAILURE)); + shardsBuilder.put(new ShardId(renamedIndex, shard), + new RestoreInProgress.ShardRestoreStatus(clusterService.state().nodes().getLocalNodeId(), + RestoreInProgress.State.FAILURE)); } } } @@ -390,7 +409,9 @@ public class RestoreService implements ClusterStateApplier { private void checkAliasNameConflicts(Map renamedIndices, Set aliases) { for (Map.Entry renamedIndex : renamedIndices.entrySet()) { if (aliases.contains(renamedIndex.getKey())) { - throw new SnapshotRestoreException(snapshot, "cannot rename index [" + renamedIndex.getValue() + "] into [" + renamedIndex.getKey() + "] because of conflict with an alias with the same name"); + throw new SnapshotRestoreException(snapshot, + "cannot rename index [" + renamedIndex.getValue() + "] into [" + renamedIndex.getKey() + "] because of " + + "conflict with an alias with the same name"); } } } @@ -409,28 +430,34 @@ public class RestoreService implements ClusterStateApplier { if (request.partial()) { return true; } else { - throw new SnapshotRestoreException(snapshot, "index [" + index + "] wasn't fully snapshotted - cannot restore"); + throw new SnapshotRestoreException(snapshot, "index [" + index + "] wasn't fully snapshotted - cannot " + + "restore"); } } else { return false; } } - private void validateExistingIndex(IndexMetaData currentIndexMetaData, IndexMetaData snapshotIndexMetaData, String renamedIndex, boolean partial) { + private void validateExistingIndex(IndexMetaData currentIndexMetaData, IndexMetaData snapshotIndexMetaData, + String renamedIndex, boolean partial) { // Index exist - checking that it's closed if (currentIndexMetaData.getState() != IndexMetaData.State.CLOSE) { // TODO: Enable restore for open indices - throw new SnapshotRestoreException(snapshot, "cannot restore index [" + renamedIndex + "] because an open index with same name already exists in the cluster. " + - "Either close or delete the existing index or restore the index under a different name by providing a rename pattern and replacement name"); + throw new SnapshotRestoreException(snapshot, "cannot restore index [" + renamedIndex + "] because an open index " + + "with same name already exists in the cluster. Either close or delete the existing index or restore the " + + "index under a different name by providing a rename pattern and replacement name"); } // Index exist - checking if it's partial restore if (partial) { - throw new SnapshotRestoreException(snapshot, "cannot restore partial index [" + renamedIndex + "] because such index already exists"); + throw new SnapshotRestoreException(snapshot, "cannot restore partial index [" + renamedIndex + "] because such " + + "index already exists"); } // Make sure that the number of shards is the same. That's the only thing that we cannot change if (currentIndexMetaData.getNumberOfShards() != snapshotIndexMetaData.getNumberOfShards()) { - throw new SnapshotRestoreException(snapshot, "cannot restore index [" + renamedIndex + "] with [" + currentIndexMetaData.getNumberOfShards() + - "] shards from a snapshot of index [" + snapshotIndexMetaData.getIndex().getName() + "] with [" + snapshotIndexMetaData.getNumberOfShards() + "] shards"); + throw new SnapshotRestoreException(snapshot, + "cannot restore index [" + renamedIndex + "] with [" + currentIndexMetaData.getNumberOfShards() + "] shards " + + "from a snapshot of index [" + snapshotIndexMetaData.getIndex().getName() + "] with [" + + snapshotIndexMetaData.getNumberOfShards() + "] shards"); } } @@ -442,7 +469,10 @@ public class RestoreService implements ClusterStateApplier { if (changeSettings.names().isEmpty() && ignoreSettings.length == 0) { return indexMetaData; } - Settings normalizedChangeSettings = Settings.builder().put(changeSettings).normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX).build(); + Settings normalizedChangeSettings = Settings.builder() + .put(changeSettings) + .normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX) + .build(); IndexMetaData.Builder builder = IndexMetaData.builder(indexMetaData); Settings settings = indexMetaData.getSettings(); Set keyFilters = new HashSet<>(); @@ -504,7 +534,8 @@ public class RestoreService implements ClusterStateApplier { } catch (Exception e) { - logger.warn(() -> new ParameterizedMessage("[{}] failed to restore snapshot", request.repositoryName + ":" + request.snapshotName), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to restore snapshot", + request.repositoryName + ":" + request.snapshotName), e); listener.onFailure(e); } } @@ -521,12 +552,14 @@ public class RestoreService implements ClusterStateApplier { if (shardsBuilder == null) { shardsBuilder = ImmutableOpenMap.builder(entry.shards()); } - shardsBuilder.put(shardId, new ShardRestoreStatus(null, RestoreInProgress.State.FAILURE, "index was deleted")); + shardsBuilder.put(shardId, + new ShardRestoreStatus(null, RestoreInProgress.State.FAILURE, "index was deleted")); } } if (shardsBuilder != null) { ImmutableOpenMap shards = shardsBuilder.build(); - builder.add(new RestoreInProgress.Entry(entry.uuid(), entry.snapshot(), overallState(RestoreInProgress.State.STARTED, shards), entry.indices(), shards)); + builder.add(new RestoreInProgress.Entry(entry.uuid(), entry.snapshot(), + overallState(RestoreInProgress.State.STARTED, shards), entry.indices(), shards)); } else { builder.add(entry); } @@ -602,8 +635,8 @@ public class RestoreService implements ClusterStateApplier { initializedShard.recoverySource().getType() != RecoverySource.Type.SNAPSHOT) { changes(unassignedShard.recoverySource()).shards.put( unassignedShard.shardId(), - new ShardRestoreStatus(null, - RestoreInProgress.State.FAILURE, "recovery source type changed from snapshot to " + initializedShard.recoverySource()) + new ShardRestoreStatus(null, RestoreInProgress.State.FAILURE, + "recovery source type changed from snapshot to " + initializedShard.recoverySource()) ); } } @@ -672,7 +705,8 @@ public class RestoreService implements ClusterStateApplier { return null; } - static class CleanRestoreStateTaskExecutor implements ClusterStateTaskExecutor, ClusterStateTaskListener { + static class CleanRestoreStateTaskExecutor implements ClusterStateTaskExecutor, + ClusterStateTaskListener { static class Task { final String uuid; diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java index b512a570a85..c1692f60617 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java @@ -232,16 +232,18 @@ public final class SnapshotInfo implements Comparable, ToXContent, private final List shardFailures; public SnapshotInfo(SnapshotId snapshotId, List indices, SnapshotState state) { - this(snapshotId, indices, state, null, null, 0L, 0L, 0, 0, Collections.emptyList(), null); + this(snapshotId, indices, state, null, null, 0L, 0L, 0, 0, + Collections.emptyList(), null); } public SnapshotInfo(SnapshotId snapshotId, List indices, SnapshotState state, Version version) { - this(snapshotId, indices, state, null, version, 0L, 0L, 0, 0, Collections.emptyList(), null); + this(snapshotId, indices, state, null, version, 0L, 0L, 0, 0, + Collections.emptyList(), null); } public SnapshotInfo(SnapshotId snapshotId, List indices, long startTime, Boolean includeGlobalState) { - this(snapshotId, indices, SnapshotState.IN_PROGRESS, null, Version.CURRENT, startTime, 0L, 0, 0, - Collections.emptyList(), includeGlobalState); + this(snapshotId, indices, SnapshotState.IN_PROGRESS, null, Version.CURRENT, startTime, 0L, + 0, 0, Collections.emptyList(), includeGlobalState); } public SnapshotInfo(SnapshotId snapshotId, List indices, long startTime, String reason, long endTime, @@ -306,7 +308,8 @@ public final class SnapshotInfo implements Comparable, ToXContent, public static SnapshotInfo incompatible(SnapshotId snapshotId) { return new SnapshotInfo(snapshotId, Collections.emptyList(), SnapshotState.INCOMPATIBLE, "the snapshot is incompatible with the current version of Elasticsearch and its exact version is unknown", - null, 0L, 0L, 0, 0, Collections.emptyList(), null); + null, 0L, 0L, 0, 0, + Collections.emptyList(), null); } /** diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java index 7e590bc4104..40c89f10ccb 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java @@ -116,8 +116,8 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements private final UpdateSnapshotStatusAction updateSnapshotStatusHandler; @Inject - public SnapshotShardsService(Settings settings, ClusterService clusterService, SnapshotsService snapshotsService, ThreadPool threadPool, - TransportService transportService, IndicesService indicesService, + public SnapshotShardsService(Settings settings, ClusterService clusterService, SnapshotsService snapshotsService, + ThreadPool threadPool, TransportService transportService, IndicesService indicesService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { super(settings); this.indicesService = indicesService; @@ -188,7 +188,8 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements for (Map.Entry> snapshotShards : snapshotShardsMap.entrySet()) { Map shards = snapshotShards.getValue(); if (shards.containsKey(shardId)) { - logger.debug("[{}] shard closing, abort snapshotting for snapshot [{}]", shardId, snapshotShards.getKey().getSnapshotId()); + logger.debug("[{}] shard closing, abort snapshotting for snapshot [{}]", + shardId, snapshotShards.getKey().getSnapshotId()); shards.get(shardId).abortIfNotCompleted("shard is closing, aborting"); } } @@ -337,7 +338,8 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements @Override public void onFailure(Exception e) { - logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to snapshot shard", shardId, snapshot), e); + logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to snapshot shard", + shardId, snapshot), e); failure.set(e); } @@ -367,7 +369,8 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements * @param snapshot snapshot * @param snapshotStatus snapshot status */ - private void snapshot(final IndexShard indexShard, final Snapshot snapshot, final IndexId indexId, final IndexShardSnapshotStatus snapshotStatus) { + private void snapshot(final IndexShard indexShard, final Snapshot snapshot, final IndexId indexId, + final IndexShardSnapshotStatus snapshotStatus) { final ShardId shardId = indexShard.shardId(); if (indexShard.routingEntry().primary() == false) { throw new IndexShardSnapshotFailedException(shardId, "snapshot should be performed only on primary"); @@ -526,7 +529,8 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements * * @param request update shard status request */ - private void innerUpdateSnapshotState(final UpdateIndexShardSnapshotStatusRequest request, ActionListener listener) { + private void innerUpdateSnapshotState(final UpdateIndexShardSnapshotStatusRequest request, + ActionListener listener) { logger.trace("received updated snapshot restore state [{}]", request); clusterService.submitStateUpdateTask( "update snapshot state", @@ -549,7 +553,8 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements class SnapshotStateExecutor implements ClusterStateTaskExecutor { @Override - public ClusterTasksResult execute(ClusterState currentState, List tasks) throws Exception { + public ClusterTasksResult + execute(ClusterState currentState, List tasks) throws Exception { final SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE); if (snapshots != null) { int changedCount = 0; @@ -560,7 +565,8 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements for (UpdateIndexShardSnapshotStatusRequest updateSnapshotState : tasks) { if (entry.snapshot().equals(updateSnapshotState.snapshot())) { - logger.trace("[{}] Updating shard [{}] with status [{}]", updateSnapshotState.snapshot(), updateSnapshotState.shardId(), updateSnapshotState.status().state()); + logger.trace("[{}] Updating shard [{}] with status [{}]", updateSnapshotState.snapshot(), + updateSnapshotState.shardId(), updateSnapshotState.status().state()); if (updated == false) { shards.putAll(entry.shards()); updated = true; @@ -588,7 +594,8 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements if (changedCount > 0) { logger.trace("changed cluster state triggered by {} snapshot state updates", changedCount); - final SnapshotsInProgress updatedSnapshots = new SnapshotsInProgress(entries.toArray(new SnapshotsInProgress.Entry[entries.size()])); + final SnapshotsInProgress updatedSnapshots = + new SnapshotsInProgress(entries.toArray(new SnapshotsInProgress.Entry[entries.size()])); return ClusterTasksResult.builder().successes(tasks).build( ClusterState.builder(currentState).putCustom(SnapshotsInProgress.TYPE, updatedSnapshots).build()); } @@ -606,8 +613,8 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements UpdateSnapshotStatusAction(TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { super( - SnapshotShardsService.UPDATE_SNAPSHOT_STATUS_ACTION_NAME, transportService, clusterService, threadPool, actionFilters, - indexNameExpressionResolver, UpdateIndexShardSnapshotStatusRequest::new + SnapshotShardsService.UPDATE_SNAPSHOT_STATUS_ACTION_NAME, transportService, clusterService, threadPool, + actionFilters, indexNameExpressionResolver, UpdateIndexShardSnapshotStatusRequest::new ); } @@ -622,7 +629,8 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements } @Override - protected void masterOperation(UpdateIndexShardSnapshotStatusRequest request, ClusterState state, ActionListener listener) throws Exception { + protected void masterOperation(UpdateIndexShardSnapshotStatusRequest request, ClusterState state, + ActionListener listener) throws Exception { innerUpdateSnapshotState(request, listener); } diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index fa7c757aaca..8c505d20d17 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -92,16 +92,20 @@ import static org.elasticsearch.cluster.SnapshotsInProgress.completed; *

* A typical snapshot creating process looks like this: *

    - *
  • On the master node the {@link #createSnapshot(SnapshotRequest, CreateSnapshotListener)} is called and makes sure that no snapshots is currently running - * and registers the new snapshot in cluster state
  • - *
  • When cluster state is updated the {@link #beginSnapshot(ClusterState, SnapshotsInProgress.Entry, boolean, CreateSnapshotListener)} method - * kicks in and initializes the snapshot in the repository and then populates list of shards that needs to be snapshotted in cluster state
  • + *
  • On the master node the {@link #createSnapshot(SnapshotRequest, CreateSnapshotListener)} is called and makes sure that no snapshots + * is currently running and registers the new snapshot in cluster state
  • + *
  • When cluster state is updated + * the {@link #beginSnapshot(ClusterState, SnapshotsInProgress.Entry, boolean, CreateSnapshotListener)} method kicks in and initializes + * the snapshot in the repository and then populates list of shards that needs to be snapshotted in cluster state
  • *
  • Each data node is watching for these shards and when new shards scheduled for snapshotting appear in the cluster state, data nodes * start processing them through {@link SnapshotShardsService#processIndexShardSnapshots(ClusterChangedEvent)} method
  • - *
  • Once shard snapshot is created data node updates state of the shard in the cluster state using the {@link SnapshotShardsService#sendSnapshotShardUpdate(Snapshot, ShardId, ShardSnapshotStatus)} method
  • - *
  • When last shard is completed master node in {@link SnapshotShardsService#innerUpdateSnapshotState} method marks the snapshot as completed
  • + *
  • Once shard snapshot is created data node updates state of the shard in the cluster state using + * the {@link SnapshotShardsService#sendSnapshotShardUpdate(Snapshot, ShardId, ShardSnapshotStatus)} method
  • + *
  • When last shard is completed master node in {@link SnapshotShardsService#innerUpdateSnapshotState} method marks the snapshot + * as completed
  • *
  • After cluster state is updated, the {@link #endSnapshot(SnapshotsInProgress.Entry)} finalizes snapshot in the repository, - * notifies all {@link #snapshotCompletionListeners} that snapshot is completed, and finally calls {@link #removeSnapshotFromClusterState(Snapshot, SnapshotInfo, Exception)} to remove snapshot from cluster state
  • + * notifies all {@link #snapshotCompletionListeners} that snapshot is completed, and finally calls + * {@link #removeSnapshotFromClusterState(Snapshot, SnapshotInfo, Exception)} to remove snapshot from cluster state *
*/ public class SnapshotsService extends AbstractLifecycleComponent implements ClusterStateApplier { @@ -118,7 +122,8 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus private final CopyOnWriteArrayList snapshotCompletionListeners = new CopyOnWriteArrayList<>(); @Inject - public SnapshotsService(Settings settings, ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver, RepositoriesService repositoriesService, ThreadPool threadPool) { + public SnapshotsService(Settings settings, ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver, + RepositoriesService repositoriesService, ThreadPool threadPool) { super(settings); this.clusterService = clusterService; this.indexNameExpressionResolver = indexNameExpressionResolver; @@ -253,7 +258,8 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE); if (snapshots == null || snapshots.entries().isEmpty()) { // Store newSnapshot here to be processed in clusterStateProcessed - List indices = Arrays.asList(indexNameExpressionResolver.concreteIndexNames(currentState, request.indicesOptions(), request.indices())); + List indices = Arrays.asList(indexNameExpressionResolver.concreteIndexNames(currentState, + request.indicesOptions(), request.indices())); logger.trace("[{}][{}] creating snapshot for indices [{}]", repositoryName, snapshotName, indices); List snapshotIndices = repositoryData.resolveNewIndices(indices); newSnapshot = new SnapshotsInProgress.Entry(new Snapshot(repositoryName, snapshotId), @@ -393,9 +399,11 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus if (entry.state() != State.ABORTED) { // Replace the snapshot that was just intialized - ImmutableOpenMap shards = shards(currentState, entry.indices()); + ImmutableOpenMap shards = + shards(currentState, entry.indices()); if (!partial) { - Tuple, Set> indicesWithMissingShards = indicesWithMissingShards(shards, currentState.metaData()); + Tuple, Set> indicesWithMissingShards = indicesWithMissingShards(shards, + currentState.metaData()); Set missing = indicesWithMissingShards.v1(); Set closed = indicesWithMissingShards.v2(); if (missing.isEmpty() == false || closed.isEmpty() == false) { @@ -437,8 +445,10 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus @Override public void onFailure(String source, Exception e) { - logger.warn(() -> new ParameterizedMessage("[{}] failed to create snapshot", snapshot.snapshot().getSnapshotId()), e); - removeSnapshotFromClusterState(snapshot.snapshot(), null, e, new CleanupAfterErrorListener(snapshot, true, userCreateSnapshotListener, e)); + logger.warn(() -> new ParameterizedMessage("[{}] failed to create snapshot", + snapshot.snapshot().getSnapshotId()), e); + removeSnapshotFromClusterState(snapshot.snapshot(), null, e, + new CleanupAfterErrorListener(snapshot, true, userCreateSnapshotListener, e)); } @Override @@ -471,8 +481,10 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus @Override public void onFailure(Exception e) { - logger.warn(() -> new ParameterizedMessage("failed to create snapshot [{}]", snapshot.snapshot().getSnapshotId()), e); - removeSnapshotFromClusterState(snapshot.snapshot(), null, e, new CleanupAfterErrorListener(snapshot, snapshotCreated, userCreateSnapshotListener, e)); + logger.warn(() -> new ParameterizedMessage("failed to create snapshot [{}]", + snapshot.snapshot().getSnapshotId()), e); + removeSnapshotFromClusterState(snapshot.snapshot(), null, e, + new CleanupAfterErrorListener(snapshot, snapshotCreated, userCreateSnapshotListener, e)); } }); } @@ -484,7 +496,8 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus private final CreateSnapshotListener userCreateSnapshotListener; private final Exception e; - CleanupAfterErrorListener(SnapshotsInProgress.Entry snapshot, boolean snapshotCreated, CreateSnapshotListener userCreateSnapshotListener, Exception e) { + CleanupAfterErrorListener(SnapshotsInProgress.Entry snapshot, boolean snapshotCreated, + CreateSnapshotListener userCreateSnapshotListener, Exception e) { this.snapshot = snapshot; this.snapshotCreated = snapshotCreated; this.userCreateSnapshotListener = userCreateSnapshotListener; @@ -520,7 +533,8 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus snapshot.includeGlobalState()); } catch (Exception inner) { inner.addSuppressed(exception); - logger.warn(() -> new ParameterizedMessage("[{}] failed to close snapshot in repository", snapshot.snapshot()), inner); + logger.warn(() -> new ParameterizedMessage("[{}] failed to close snapshot in repository", + snapshot.snapshot()), inner); } } userCreateSnapshotListener.onFailure(e); @@ -744,8 +758,10 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus } else { // TODO: Restart snapshot on another node? snapshotChanged = true; - logger.warn("failing snapshot of shard [{}] on closed node [{}]", shardEntry.key, shardStatus.nodeId()); - shards.put(shardEntry.key, new ShardSnapshotStatus(shardStatus.nodeId(), State.FAILED, "node shutdown")); + logger.warn("failing snapshot of shard [{}] on closed node [{}]", + shardEntry.key, shardStatus.nodeId()); + shards.put(shardEntry.key, + new ShardSnapshotStatus(shardStatus.nodeId(), State.FAILED, "node shutdown")); } } } @@ -808,7 +824,8 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus for (final SnapshotsInProgress.Entry snapshot : snapshots.entries()) { SnapshotsInProgress.Entry updatedSnapshot = snapshot; if (snapshot.state() == State.STARTED) { - ImmutableOpenMap shards = processWaitingShards(snapshot.shards(), routingTable); + ImmutableOpenMap shards = processWaitingShards(snapshot.shards(), + routingTable); if (shards != null) { changed = true; if (!snapshot.state().completed() && completed(shards.values())) { @@ -831,7 +848,8 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus @Override public void onFailure(String source, Exception e) { - logger.warn(() -> new ParameterizedMessage("failed to update snapshot state after shards started from [{}] ", source), e); + logger.warn(() -> + new ParameterizedMessage("failed to update snapshot state after shards started from [{}] ", source), e); } }); } @@ -929,12 +947,14 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus * @param shards list of shard statuses * @return list of failed and closed indices */ - private Tuple, Set> indicesWithMissingShards(ImmutableOpenMap shards, MetaData metaData) { + private Tuple, Set> indicesWithMissingShards( + ImmutableOpenMap shards, MetaData metaData) { Set missing = new HashSet<>(); Set closed = new HashSet<>(); for (ObjectObjectCursor entry : shards) { if (entry.value.state() == State.MISSING) { - if (metaData.hasIndex(entry.key.getIndex().getName()) && metaData.getIndexSafe(entry.key.getIndex()).getState() == IndexMetaData.State.CLOSE) { + if (metaData.hasIndex(entry.key.getIndex().getName()) && + metaData.getIndexSafe(entry.key.getIndex()).getState() == IndexMetaData.State.CLOSE) { closed.add(entry.key.getIndex().getName()); } else { missing.add(entry.key.getIndex().getName()); @@ -1130,7 +1150,8 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus public ClusterState execute(ClusterState currentState) throws Exception { SnapshotDeletionsInProgress deletionsInProgress = currentState.custom(SnapshotDeletionsInProgress.TYPE); if (deletionsInProgress != null && deletionsInProgress.hasDeletionsInProgress()) { - throw new ConcurrentSnapshotExecutionException(snapshot, "cannot delete - another snapshot is currently being deleted"); + throw new ConcurrentSnapshotExecutionException(snapshot, + "cannot delete - another snapshot is currently being deleted"); } RestoreInProgress restoreInProgress = currentState.custom(RestoreInProgress.TYPE); if (restoreInProgress != null) { @@ -1236,7 +1257,8 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus listener, true); } catch (Exception ex) { - logger.warn(() -> new ParameterizedMessage("[{}] failed to delete snapshot", snapshot), ex); + logger.warn(() -> + new ParameterizedMessage("[{}] failed to delete snapshot", snapshot), ex); } } ); @@ -1384,7 +1406,8 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus IndexMetaData indexMetaData = metaData.index(indexName); if (indexMetaData == null) { // The index was deleted before we managed to start the snapshot - mark it as missing. - builder.put(new ShardId(indexName, IndexMetaData.INDEX_UUID_NA_VALUE, 0), new SnapshotsInProgress.ShardSnapshotStatus(null, State.MISSING, "missing index")); + builder.put(new ShardId(indexName, IndexMetaData.INDEX_UUID_NA_VALUE, 0), + new SnapshotsInProgress.ShardSnapshotStatus(null, State.MISSING, "missing index")); } else if (indexMetaData.getState() == IndexMetaData.State.CLOSE) { for (int i = 0; i < indexMetaData.getNumberOfShards(); i++) { ShardId shardId = new ShardId(indexMetaData.getIndex(), i); @@ -1397,17 +1420,22 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus if (indexRoutingTable != null) { ShardRouting primary = indexRoutingTable.shard(i).primaryShard(); if (primary == null || !primary.assignedToNode()) { - builder.put(shardId, new SnapshotsInProgress.ShardSnapshotStatus(null, State.MISSING, "primary shard is not allocated")); + builder.put(shardId, + new SnapshotsInProgress.ShardSnapshotStatus(null, State.MISSING, "primary shard is not allocated")); } else if (primary.relocating() || primary.initializing()) { - // The WAITING state was introduced in V1.2.0 - don't use it if there are nodes with older version in the cluster + // The WAITING state was introduced in V1.2.0 - + // don't use it if there are nodes with older version in the cluster builder.put(shardId, new SnapshotsInProgress.ShardSnapshotStatus(primary.currentNodeId(), State.WAITING)); } else if (!primary.started()) { - builder.put(shardId, new SnapshotsInProgress.ShardSnapshotStatus(primary.currentNodeId(), State.MISSING, "primary shard hasn't been started yet")); + builder.put(shardId, + new SnapshotsInProgress.ShardSnapshotStatus(primary.currentNodeId(), State.MISSING, + "primary shard hasn't been started yet")); } else { builder.put(shardId, new SnapshotsInProgress.ShardSnapshotStatus(primary.currentNodeId())); } } else { - builder.put(shardId, new SnapshotsInProgress.ShardSnapshotStatus(null, State.MISSING, "missing routing table")); + builder.put(shardId, new SnapshotsInProgress.ShardSnapshotStatus(null, State.MISSING, + "missing routing table")); } } } diff --git a/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index a3be8cfa15b..74efe5f68cc 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -367,7 +367,8 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest logger.info("--> start 2 nodes"); Client client = client(); - assertAcked(prepareCreate("test-idx", 2, Settings.builder().put("number_of_shards", 2).put("number_of_replicas", 0))); + assertAcked(prepareCreate("test-idx", 2, Settings.builder().put("number_of_shards", 2) + .put("number_of_replicas", 0))); ensureGreen(); logger.info("--> indexing some data"); @@ -392,7 +393,10 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest String blockedNode = blockNodeWithIndex("test-repo", "test-idx"); logger.info("--> snapshot"); - client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(false).setIndices("test-idx").get(); + client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap") + .setWaitForCompletion(false) + .setIndices("test-idx") + .get(); logger.info("--> waiting for block to kick in"); waitForBlock(blockedNode, "test-repo", TimeValue.timeValueSeconds(60)); @@ -415,7 +419,8 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest nodes.add(internalCluster().startNode()); Client client = client(); - assertAcked(prepareCreate("test-idx", 2, Settings.builder().put("number_of_shards", 2).put("number_of_replicas", 0))); + assertAcked(prepareCreate("test-idx", 2, Settings.builder().put("number_of_shards", 2) + .put("number_of_replicas", 0))); ensureGreen(); logger.info("--> indexing some data"); @@ -443,7 +448,10 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest int numberOfFilesBeforeSnapshot = numberOfFiles(repo); logger.info("--> snapshot"); - client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(false).setIndices("test-idx").get(); + client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap") + .setWaitForCompletion(false) + .setIndices("test-idx") + .get(); logger.info("--> waiting for block to kick in"); waitForBlock(blockedNode, "test-repo", TimeValue.timeValueSeconds(60)); @@ -509,7 +517,8 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest ensureGreen("test-idx-all"); logger.info("--> create an index that will be closed"); - assertAcked(prepareCreate("test-idx-closed", 1, Settings.builder().put("number_of_shards", 4).put("number_of_replicas", 0))); + assertAcked(prepareCreate("test-idx-closed", 1, Settings.builder().put("number_of_shards", 4) + .put("number_of_replicas", 0))); ensureGreen("test-idx-closed"); logger.info("--> indexing some data into test-idx-all"); diff --git a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index 35e813756bc..fcb06acd60a 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -1345,7 +1345,8 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas client.admin().cluster().prepareDeleteSnapshot("test-repo", "test-snap-1").get(); logger.info("--> make sure snapshot doesn't exist"); - assertThrows(client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("test-snap-1"), SnapshotMissingException.class); + assertThrows(client.admin().cluster().prepareGetSnapshots("test-repo") + .addSnapshots("test-snap-1"), SnapshotMissingException.class); for (String index : indices) { assertTrue(Files.notExists(indicesPath.resolve(indexIds.get(index).getId()))); @@ -1384,7 +1385,8 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas client.admin().cluster().prepareDeleteSnapshot("test-repo", "test-snap-1").get(); logger.info("--> make sure snapshot doesn't exist"); - assertThrows(client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("test-snap-1"), SnapshotMissingException.class); + assertThrows(client.admin().cluster().prepareGetSnapshots("test-repo") + .addSnapshots("test-snap-1"), SnapshotMissingException.class); } public void testDeleteSnapshotWithCorruptedSnapshotFile() throws Exception { @@ -2014,7 +2016,8 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas logger.info("--> waiting for block to kick in"); waitForBlock(blockedNode, "test-repo", TimeValue.timeValueSeconds(60)); - logger.info("--> execution was blocked on node [{}], checking snapshot status with specified repository and snapshot", blockedNode); + logger.info("--> execution was blocked on node [{}], checking snapshot status with specified repository and snapshot", + blockedNode); SnapshotsStatusResponse response = client.admin().cluster().prepareSnapshotStatus("test-repo").execute().actionGet(); assertThat(response.getSnapshots().size(), equalTo(1)); SnapshotStatus snapshotStatus = response.getSnapshots().get(0); diff --git a/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java b/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java index 9dca64d5b83..2b634385b29 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java +++ b/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java @@ -346,7 +346,8 @@ public class MockRepository extends FsRepository { } @Override - public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws IOException { + public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) + throws IOException { maybeIOExceptionOrBlock(blobName); super.writeBlob(blobName, inputStream, blobSize, failIfAlreadyExists); if (RandomizedContext.current().getRandom().nextBoolean()) { From 344917efabe9b9bd5df0cd70a7be85d417d4964a Mon Sep 17 00:00:00 2001 From: Alan Woodward Date: Wed, 19 Dec 2018 11:12:18 +0000 Subject: [PATCH 05/20] Add script filter to intervals (#36776) This commit adds the ability to filter out intervals based on their start and end position, and internal gaps: ``` POST _search { "query": { "intervals" : { "my_text" : { "match" : { "query" : "hot porridge", "filter" : { "script" : { "source" : "interval.start > 10 && interval.end < 20 && interval.gaps == 0" } } } } } } } ``` --- .../query-dsl/intervals-query.asciidoc | 29 +++++++++ .../painless/spi/org.elasticsearch.txt | 6 ++ .../painless/90_interval_query_filter.yml | 46 ++++++++++++++ .../index/query/IntervalFilterScript.java | 60 +++++++++++++++++++ .../index/query/IntervalsSourceProvider.java | 58 +++++++++++++++++- .../elasticsearch/script/ScriptModule.java | 4 +- .../query/IntervalQueryBuilderTests.java | 49 +++++++++++++++ .../script/MockScriptEngine.java | 13 ++++ 8 files changed, 262 insertions(+), 3 deletions(-) create mode 100644 modules/lang-painless/src/test/resources/rest-api-spec/test/painless/90_interval_query_filter.yml create mode 100644 server/src/main/java/org/elasticsearch/index/query/IntervalFilterScript.java diff --git a/docs/reference/query-dsl/intervals-query.asciidoc b/docs/reference/query-dsl/intervals-query.asciidoc index 790fdf08bfd..27609e85659 100644 --- a/docs/reference/query-dsl/intervals-query.asciidoc +++ b/docs/reference/query-dsl/intervals-query.asciidoc @@ -154,6 +154,35 @@ Produces intervals that are not contained by an interval from the filter rule `not_overlapping`:: Produces intervals that do not overlap with an interval from the filter rule +[[interval-script-filter]] +==== Script filters + +You can also filter intervals based on their start position, end position and +internal gap count, using a script. The script has access to an `interval` +variable, with `start`, `end` and `gaps` methods: + +[source,js] +-------------------------------------------------- +POST _search +{ + "query": { + "intervals" : { + "my_text" : { + "match" : { + "query" : "hot porridge", + "filter" : { + "script" : { + "source" : "interval.start > 10 && interval.end < 20 && interval.gaps == 0" + } + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE + [[interval-minimization]] ==== Minimization diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.txt index d5ced84ebcb..2b4946d1ca8 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.txt @@ -234,6 +234,12 @@ class org.elasticsearch.index.similarity.ScriptedSimilarity$Doc { float getFreq() } +class org.elasticsearch.index.query.IntervalFilterScript$Interval { + int getStart() + int getEnd() + int getGaps() +} + # for testing class org.elasticsearch.painless.FeatureTest no_import { int z diff --git a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/90_interval_query_filter.yml b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/90_interval_query_filter.yml new file mode 100644 index 00000000000..8aa10e92a24 --- /dev/null +++ b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/90_interval_query_filter.yml @@ -0,0 +1,46 @@ +setup: + - skip: + version: " - 6.99.99" + reason: "Implemented in 7.0" + + - do: + indices.create: + index: test + body: + mappings: + test: + properties: + text: + type: text + analyzer: standard + - do: + bulk: + refresh: true + body: + - '{"index": {"_index": "test", "_type": "test", "_id": "1"}}' + - '{"text" : "Some like it hot, some like it cold"}' + - '{"index": {"_index": "test", "_type": "test", "_id": "2"}}' + - '{"text" : "Its cold outside, theres no kind of atmosphere"}' + - '{"index": {"_index": "test", "_type": "test", "_id": "3"}}' + - '{"text" : "Baby its cold there outside"}' + - '{"index": {"_index": "test", "_type": "test", "_id": "4"}}' + - '{"text" : "Outside it is cold and wet"}' + +--- +"Test filtering by script": + - do: + search: + index: test + body: + query: + intervals: + text: + match: + query: "cold" + filter: + script: + source: "interval.start > 3" + + - match: { hits.total.value: 1 } + + diff --git a/server/src/main/java/org/elasticsearch/index/query/IntervalFilterScript.java b/server/src/main/java/org/elasticsearch/index/query/IntervalFilterScript.java new file mode 100644 index 00000000000..306560b3d5f --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/query/IntervalFilterScript.java @@ -0,0 +1,60 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query; + +import org.apache.lucene.search.intervals.IntervalIterator; +import org.elasticsearch.script.ScriptContext; + +/** + * Base class for scripts used as interval filters, see {@link IntervalsSourceProvider.IntervalFilter} + */ +public abstract class IntervalFilterScript { + + public static class Interval { + + private IntervalIterator iterator; + + void setIterator(IntervalIterator iterator) { + this.iterator = iterator; + } + + public int getStart() { + return iterator.start(); + } + + public int getEnd() { + return iterator.end(); + } + + public int getGaps() { + return iterator.gaps(); + } + } + + public abstract boolean execute(Interval interval); + + public interface Factory { + IntervalFilterScript newInstance(); + } + + public static final String[] PARAMETERS = new String[]{ "interval" }; + public static final ScriptContext CONTEXT = new ScriptContext<>("interval", Factory.class); + +} diff --git a/server/src/main/java/org/elasticsearch/index/query/IntervalsSourceProvider.java b/server/src/main/java/org/elasticsearch/index/query/IntervalsSourceProvider.java index 79bcbe26fbc..3fa608db37e 100644 --- a/server/src/main/java/org/elasticsearch/index/query/IntervalsSourceProvider.java +++ b/server/src/main/java/org/elasticsearch/index/query/IntervalsSourceProvider.java @@ -19,6 +19,8 @@ package org.elasticsearch.index.query; +import org.apache.lucene.search.intervals.FilteredIntervalsSource; +import org.apache.lucene.search.intervals.IntervalIterator; import org.apache.lucene.search.intervals.Intervals; import org.apache.lucene.search.intervals.IntervalsSource; import org.elasticsearch.common.ParseField; @@ -34,6 +36,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.script.Script; import java.io.IOException; import java.util.ArrayList; @@ -387,24 +390,59 @@ public abstract class IntervalsSourceProvider implements NamedWriteable, ToXCont } } + static class ScriptFilterSource extends FilteredIntervalsSource { + + final IntervalFilterScript script; + IntervalFilterScript.Interval interval = new IntervalFilterScript.Interval(); + + ScriptFilterSource(IntervalsSource in, String name, IntervalFilterScript script) { + super("FILTER(" + name + ")", in); + this.script = script; + } + + @Override + protected boolean accept(IntervalIterator it) { + interval.setIterator(it); + return script.execute(interval); + } + } + public static class IntervalFilter implements ToXContent, Writeable { public static final String NAME = "filter"; private final String type; private final IntervalsSourceProvider filter; + private final Script script; public IntervalFilter(IntervalsSourceProvider filter, String type) { this.filter = filter; this.type = type.toLowerCase(Locale.ROOT); + this.script = null; + } + + IntervalFilter(Script script) { + this.script = script; + this.type = "script"; + this.filter = null; } public IntervalFilter(StreamInput in) throws IOException { this.type = in.readString(); - this.filter = in.readNamedWriteable(IntervalsSourceProvider.class); + this.filter = in.readOptionalNamedWriteable(IntervalsSourceProvider.class); + if (in.readBoolean()) { + this.script = new Script(in); + } + else { + this.script = null; + } } public IntervalsSource filter(IntervalsSource input, QueryShardContext context, MappedFieldType fieldType) throws IOException { + if (script != null) { + IntervalFilterScript ifs = context.getScriptService().compile(script, IntervalFilterScript.CONTEXT).newInstance(); + return new ScriptFilterSource(input, script.getIdOrCode(), ifs); + } IntervalsSource filterSource = filter.getSource(context, fieldType); switch (type) { case "containing": @@ -439,7 +477,14 @@ public abstract class IntervalsSourceProvider implements NamedWriteable, ToXCont @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(type); - out.writeNamedWriteable(filter); + out.writeOptionalNamedWriteable(filter); + if (script == null) { + out.writeBoolean(false); + } + else { + out.writeBoolean(true); + script.writeTo(out); + } } @Override @@ -458,6 +503,13 @@ public abstract class IntervalsSourceProvider implements NamedWriteable, ToXCont throw new ParsingException(parser.getTokenLocation(), "Expected [FIELD_NAME] but got [" + parser.currentToken() + "]"); } String type = parser.currentName(); + if (Script.SCRIPT_PARSE_FIELD.match(type, parser.getDeprecationHandler())) { + Script script = Script.parse(parser); + if (parser.nextToken() != XContentParser.Token.END_OBJECT) { + throw new ParsingException(parser.getTokenLocation(), "Expected [END_OBJECT] but got [" + parser.currentToken() + "]"); + } + return new IntervalFilter(script); + } if (parser.nextToken() != XContentParser.Token.START_OBJECT) { throw new ParsingException(parser.getTokenLocation(), "Expected [START_OBJECT] but got [" + parser.currentToken() + "]"); } @@ -475,4 +527,6 @@ public abstract class IntervalsSourceProvider implements NamedWriteable, ToXCont } } + + } diff --git a/server/src/main/java/org/elasticsearch/script/ScriptModule.java b/server/src/main/java/org/elasticsearch/script/ScriptModule.java index 8e9d162c52e..da61fd98d52 100644 --- a/server/src/main/java/org/elasticsearch/script/ScriptModule.java +++ b/server/src/main/java/org/elasticsearch/script/ScriptModule.java @@ -21,6 +21,7 @@ package org.elasticsearch.script; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.query.IntervalFilterScript; import org.elasticsearch.plugins.ScriptPlugin; import org.elasticsearch.search.aggregations.pipeline.MovingFunctionScript; @@ -60,7 +61,8 @@ public class ScriptModule { ScriptedMetricAggContexts.InitScript.CONTEXT, ScriptedMetricAggContexts.MapScript.CONTEXT, ScriptedMetricAggContexts.CombineScript.CONTEXT, - ScriptedMetricAggContexts.ReduceScript.CONTEXT + ScriptedMetricAggContexts.ReduceScript.CONTEXT, + IntervalFilterScript.CONTEXT ).collect(Collectors.toMap(c -> c.name, Function.identity())); } diff --git a/server/src/test/java/org/elasticsearch/index/query/IntervalQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/IntervalQueryBuilderTests.java index 06ab542ebc0..11f704e3b12 100644 --- a/server/src/test/java/org/elasticsearch/index/query/IntervalQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/IntervalQueryBuilderTests.java @@ -25,11 +25,16 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.intervals.IntervalQuery; import org.apache.lucene.search.intervals.Intervals; import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptContext; +import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.AbstractQueryTestCase; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import static org.hamcrest.Matchers.equalTo; @@ -277,4 +282,48 @@ public class IntervalQueryBuilderTests extends AbstractQueryTestCase new IntervalFilterScript() { + @Override + public boolean execute(Interval interval) { + return interval.getStart() > 3; + } + }; + + ScriptService scriptService = new ScriptService(Settings.EMPTY, Collections.emptyMap(), Collections.emptyMap()){ + @Override + @SuppressWarnings("unchecked") + public FactoryType compile(Script script, ScriptContext context) { + assertEquals(IntervalFilterScript.CONTEXT, context); + assertEquals(new Script("interval.start > 3"), script); + return (FactoryType) factory; + } + }; + + QueryShardContext baseContext = createShardContext(); + QueryShardContext context = new QueryShardContext(baseContext.getShardId(), baseContext.getIndexSettings(), + null, null, baseContext.getMapperService(), null, + scriptService, + null, null, null, null, null, null); + + String json = "{ \"intervals\" : { \"" + STRING_FIELD_NAME + "\": { " + + "\"match\" : { " + + " \"query\" : \"term1\"," + + " \"filter\" : { " + + " \"script\" : { " + + " \"source\" : \"interval.start > 3\" } } } } } }"; + + IntervalQueryBuilder builder = (IntervalQueryBuilder) parseQuery(json); + Query q = builder.toQuery(context); + + + IntervalQuery expected = new IntervalQuery(STRING_FIELD_NAME, + new IntervalsSourceProvider.ScriptFilterSource(Intervals.term("term1"), "interval.start > 3", null)); + assertEquals(expected, q); + + } + + } diff --git a/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java b/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java index 0379012d2b8..8044655b44e 100644 --- a/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java +++ b/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java @@ -21,6 +21,7 @@ package org.elasticsearch.script; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Scorable; +import org.elasticsearch.index.query.IntervalFilterScript; import org.elasticsearch.index.similarity.ScriptedSimilarity.Doc; import org.elasticsearch.index.similarity.ScriptedSimilarity.Field; import org.elasticsearch.index.similarity.ScriptedSimilarity.Query; @@ -287,6 +288,9 @@ public class MockScriptEngine implements ScriptEngine { } else if (context.instanceClazz.equals(ScriptedMetricAggContexts.ReduceScript.class)) { ScriptedMetricAggContexts.ReduceScript.Factory factory = mockCompiled::createMetricAggReduceScript; return context.factoryClazz.cast(factory); + } else if (context.instanceClazz.equals(IntervalFilterScript.class)) { + IntervalFilterScript.Factory factory = mockCompiled::createIntervalFilterScript; + return context.factoryClazz.cast(factory); } ContextCompiler compiler = contexts.get(context); if (compiler != null) { @@ -353,6 +357,15 @@ public class MockScriptEngine implements ScriptEngine { public ScriptedMetricAggContexts.ReduceScript createMetricAggReduceScript(Map params, List states) { return new MockMetricAggReduceScript(params, states, script != null ? script : ctx -> 42d); } + + public IntervalFilterScript createIntervalFilterScript() { + return new IntervalFilterScript() { + @Override + public boolean execute(Interval interval) { + return false; + } + }; + } } public static class MockFilterScript implements FilterScript.LeafFactory { From a6af33ef0b1c59cf0152634957d900b8dd0adb9c Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Wed, 19 Dec 2018 13:03:35 +0100 Subject: [PATCH 06/20] [TEST] Wait for license metadata to be installed --- .../java/org/elasticsearch/xpack/CcrIntegTestCase.java | 9 +++++++++ .../org/elasticsearch/xpack/CcrSingleNodeTestCase.java | 6 ++++++ 2 files changed, 15 insertions(+) diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java index 8865c536917..9b6933db386 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java @@ -51,6 +51,7 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.store.IndicesStore; import org.elasticsearch.license.LicenseService; +import org.elasticsearch.license.LicensesMetaData; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.ScriptService; @@ -124,6 +125,10 @@ public abstract class CcrIntegTestCase extends ESTestCase { Function.identity()); leaderCluster.beforeTest(random(), 0.0D); leaderCluster.ensureAtLeastNumDataNodes(numberOfNodesPerCluster()); + assertBusy(() -> { + ClusterService clusterService = leaderCluster.getMasterNodeInstance(ClusterService.class); + assertNotNull(clusterService.state().metaData().custom(LicensesMetaData.TYPE)); + }); String address = leaderCluster.getDataNodeInstance(TransportService.class).boundAddress().publishAddress().toString(); InternalTestCluster followerCluster = new InternalTestCluster(randomLong(), createTempDir(), true, true, numberOfNodesPerCluster(), @@ -133,6 +138,10 @@ public abstract class CcrIntegTestCase extends ESTestCase { followerCluster.beforeTest(random(), 0.0D); followerCluster.ensureAtLeastNumDataNodes(numberOfNodesPerCluster()); + assertBusy(() -> { + ClusterService clusterService = followerCluster.getMasterNodeInstance(ClusterService.class); + assertNotNull(clusterService.state().metaData().custom(LicensesMetaData.TYPE)); + }); } /** diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrSingleNodeTestCase.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrSingleNodeTestCase.java index 5311f54762b..2b1c8a8ef2e 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrSingleNodeTestCase.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrSingleNodeTestCase.java @@ -11,6 +11,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.license.LicenseService; +import org.elasticsearch.license.LicensesMetaData; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.transport.TransportService; @@ -57,6 +58,11 @@ public abstract class CcrSingleNodeTestCase extends ESSingleNodeTestCase { assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); } + @Before + public void waitForTrialLicenseToBeGenerated() throws Exception { + assertBusy(() -> assertNotNull(getInstanceFromNode(ClusterService.class).state().metaData().custom(LicensesMetaData.TYPE))); + } + @After public void purgeCCRMetadata() throws Exception { ClusterService clusterService = getInstanceFromNode(ClusterService.class); From 978713a67c5cece341c3f94406f8864574c42c22 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Wed, 19 Dec 2018 13:14:36 +0100 Subject: [PATCH 07/20] SNAPSHOTS+TESTS: Correctly Wait for Clean State (#36801) * Test must wait until there is no in-progress deletion as well here since the master failover leads to a snapshot deletion * Closes #36779 --- .../elasticsearch/discovery/SnapshotDisruptionIT.java | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/discovery/SnapshotDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/SnapshotDisruptionIT.java index cfcc7796c5e..48fb6947479 100644 --- a/server/src/test/java/org/elasticsearch/discovery/SnapshotDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/SnapshotDisruptionIT.java @@ -25,7 +25,9 @@ import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRes import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.SnapshotDeletionsInProgress; import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.cluster.coordination.FailedToCommitClusterStateException; import org.elasticsearch.cluster.service.ClusterService; @@ -72,7 +74,6 @@ public class SnapshotDisruptionIT extends ESIntegTestCase { .build(); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/36779") public void testDisruptionOnSnapshotInitialization() throws Exception { final String idxName = "test"; final List allMasterEligibleNodes = internalCluster().startMasterOnlyNodes(3); @@ -133,11 +134,15 @@ public class SnapshotDisruptionIT extends ESIntegTestCase { logger.info("--> wait until the snapshot is done"); assertBusy(() -> { - SnapshotsInProgress snapshots = dataNodeClient().admin().cluster().prepareState().setLocal(false).get().getState() - .custom(SnapshotsInProgress.TYPE); + ClusterState state = dataNodeClient().admin().cluster().prepareState().get().getState(); + SnapshotsInProgress snapshots = state.custom(SnapshotsInProgress.TYPE); + SnapshotDeletionsInProgress snapshotDeletionsInProgress = state.custom(SnapshotDeletionsInProgress.TYPE); if (snapshots != null && snapshots.entries().size() > 0) { logger.info("Current snapshot state [{}]", snapshots.entries().get(0).state()); fail("Snapshot is still running"); + } else if (snapshotDeletionsInProgress != null && snapshotDeletionsInProgress.hasDeletionsInProgress()) { + logger.info("Current snapshot deletion state [{}]", snapshotDeletionsInProgress); + fail("Snapshot deletion is still running"); } else { logger.info("Snapshot is no longer in the cluster state"); } From 216b15410768694019240636c08180245328ac3a Mon Sep 17 00:00:00 2001 From: Boaz Leskes Date: Wed, 19 Dec 2018 13:15:05 +0100 Subject: [PATCH 08/20] Replace 0L with an UNASSIGNED_PRIMARY_TERM constant (#36819) * Replace 0L with an UNASSIGNED_PRIMARY_TERM constant 0 is an illegal value for a primary term that is often used to indicate the primary term isn't assigned or is irrelevant. This PR replaces the usage of 0 with a constant, to improve readability and so it can be tracked and if needed, replaced. * feedback --- .../action/DocWriteResponse.java | 10 +++--- .../action/bulk/BulkRequest.java | 3 +- .../action/delete/DeleteRequest.java | 21 ++++++------ .../action/index/IndexRequest.java | 27 ++++++++-------- .../TransportReplicationAction.java | 4 ++- .../action/shard/ShardStateAction.java | 6 ++-- .../cluster/metadata/IndexMetaData.java | 2 ++ .../uid/PerThreadIDVersionAndSeqNoLookup.java | 14 ++++---- .../elasticsearch/index/engine/Engine.java | 32 ++++++++++--------- .../elasticsearch/index/get/GetResult.java | 17 +++++----- .../index/seqno/SequenceNumbers.java | 5 +++ .../index/translog/BaseTranslogReader.java | 3 +- .../index/translog/TranslogHeader.java | 6 ++-- .../translog/TruncateTranslogAction.java | 2 +- .../index/engine/InternalEngineTests.java | 7 ++-- .../index/get/GetResultTests.java | 3 +- .../index/translog/TranslogHeaderTests.java | 2 +- 17 files changed, 94 insertions(+), 70 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java b/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java index 5008dbe36c3..11ffad1c93d 100644 --- a/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java +++ b/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java @@ -43,6 +43,8 @@ import java.net.URLEncoder; import java.util.Locale; import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; +import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM; +import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; /** * A base class for the response of a write operation that involves a single doc @@ -266,8 +268,8 @@ public abstract class DocWriteResponse extends ReplicationResponse implements Wr seqNo = in.readZLong(); primaryTerm = in.readVLong(); } else { - seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO; - primaryTerm = 0; + seqNo = UNASSIGNED_SEQ_NO; + primaryTerm = UNASSIGNED_PRIMARY_TERM; } forcedRefresh = in.readBoolean(); result = Result.readFrom(in); @@ -378,8 +380,8 @@ public abstract class DocWriteResponse extends ReplicationResponse implements Wr protected Result result = null; protected boolean forcedRefresh; protected ShardInfo shardInfo = null; - protected Long seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO; - protected Long primaryTerm = 0L; + protected Long seqNo = UNASSIGNED_SEQ_NO; + protected Long primaryTerm = UNASSIGNED_PRIMARY_TERM; public ShardId getShardId() { return shardId; diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java index fd11b586666..01b9c22b4d1 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java @@ -56,6 +56,7 @@ import java.util.Objects; import java.util.Set; import static org.elasticsearch.action.ValidateActions.addValidationError; +import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM; /** * A bulk request holds an ordered {@link IndexRequest}s, {@link DeleteRequest}s and {@link UpdateRequest}s @@ -351,7 +352,7 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques long version = Versions.MATCH_ANY; VersionType versionType = VersionType.INTERNAL; long ifSeqNo = SequenceNumbers.UNASSIGNED_SEQ_NO; - long ifPrimaryTerm = 0; + long ifPrimaryTerm = UNASSIGNED_PRIMARY_TERM; int retryOnConflict = 0; String pipeline = valueOrDefault(defaultPipeline, globalPipeline); diff --git a/server/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java b/server/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java index 7d80b769aa5..7fb19bbf9eb 100644 --- a/server/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java +++ b/server/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java @@ -31,12 +31,13 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.ShardId; import java.io.IOException; import static org.elasticsearch.action.ValidateActions.addValidationError; +import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM; +import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; /** * A request to delete a document from an index based on its type and id. Best created using @@ -58,8 +59,8 @@ public class DeleteRequest extends ReplicatedWriteRequest private String routing; private long version = Versions.MATCH_ANY; private VersionType versionType = VersionType.INTERNAL; - private long ifSeqNo = SequenceNumbers.UNASSIGNED_SEQ_NO; - private long ifPrimaryTerm = 0; + private long ifSeqNo = UNASSIGNED_SEQ_NO; + private long ifPrimaryTerm = UNASSIGNED_PRIMARY_TERM; public DeleteRequest() { } @@ -116,16 +117,16 @@ public class DeleteRequest extends ReplicatedWriteRequest validationException = addValidationError("version type [force] may no longer be used", validationException); } - if (ifSeqNo != SequenceNumbers.UNASSIGNED_SEQ_NO && ( + if (ifSeqNo != UNASSIGNED_SEQ_NO && ( versionType != VersionType.INTERNAL || version != Versions.MATCH_ANY )) { validationException = addValidationError("compare and write operations can not use versioning", validationException); } - if (ifPrimaryTerm == 0 && ifSeqNo != SequenceNumbers.UNASSIGNED_SEQ_NO) { + if (ifPrimaryTerm == UNASSIGNED_PRIMARY_TERM && ifSeqNo != UNASSIGNED_SEQ_NO) { validationException = addValidationError("ifSeqNo is set, but primary term is [0]", validationException); } - if (ifPrimaryTerm != 0 && ifSeqNo == SequenceNumbers.UNASSIGNED_SEQ_NO) { + if (ifPrimaryTerm != UNASSIGNED_PRIMARY_TERM && ifSeqNo == UNASSIGNED_SEQ_NO) { validationException = addValidationError("ifSeqNo is unassigned, but primary term is [" + ifPrimaryTerm + "]", validationException); } @@ -239,7 +240,7 @@ public class DeleteRequest extends ReplicatedWriteRequest * {@link org.elasticsearch.index.engine.VersionConflictEngineException} will be thrown. */ public DeleteRequest setIfSeqNo(long seqNo) { - if (seqNo < 0 && seqNo != SequenceNumbers.UNASSIGNED_SEQ_NO) { + if (seqNo < 0 && seqNo != UNASSIGNED_SEQ_NO) { throw new IllegalArgumentException("sequence numbers must be non negative. got [" + seqNo + "]."); } ifSeqNo = seqNo; @@ -286,8 +287,8 @@ public class DeleteRequest extends ReplicatedWriteRequest ifSeqNo = in.readZLong(); ifPrimaryTerm = in.readVLong(); } else { - ifSeqNo = SequenceNumbers.UNASSIGNED_SEQ_NO; - ifPrimaryTerm = 0; + ifSeqNo = UNASSIGNED_SEQ_NO; + ifPrimaryTerm = UNASSIGNED_PRIMARY_TERM; } } @@ -305,7 +306,7 @@ public class DeleteRequest extends ReplicatedWriteRequest if (out.getVersion().onOrAfter(Version.V_6_6_0)) { out.writeZLong(ifSeqNo); out.writeVLong(ifPrimaryTerm); - } else if (ifSeqNo != SequenceNumbers.UNASSIGNED_SEQ_NO || ifPrimaryTerm != 0) { + } else if (ifSeqNo != UNASSIGNED_SEQ_NO || ifPrimaryTerm != UNASSIGNED_PRIMARY_TERM) { assert false : "setIfMatch [" + ifSeqNo + "], currentDocTem [" + ifPrimaryTerm + "]"; throw new IllegalStateException( "sequence number based compare and write is not supported until all nodes are on version 7.0 or higher. " + diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java index 46028a7c036..195f6863b3d 100644 --- a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java @@ -43,7 +43,6 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.VersionType; -import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.shard.ShardId; @@ -54,6 +53,8 @@ import java.util.Map; import java.util.Objects; import static org.elasticsearch.action.ValidateActions.addValidationError; +import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM; +import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; /** * Index request to index a typed JSON document into a specific index and make it searchable. Best @@ -106,8 +107,8 @@ public class IndexRequest extends ReplicatedWriteRequest implement private long autoGeneratedTimestamp = UNSET_AUTO_GENERATED_TIMESTAMP; private boolean isRetry = false; - private long ifSeqNo = SequenceNumbers.UNASSIGNED_SEQ_NO; - private long ifPrimaryTerm = 0; + private long ifSeqNo = UNASSIGNED_SEQ_NO; + private long ifPrimaryTerm = UNASSIGNED_PRIMARY_TERM; public IndexRequest() { @@ -174,7 +175,7 @@ public class IndexRequest extends ReplicatedWriteRequest implement return validationException; } - if (ifSeqNo != SequenceNumbers.UNASSIGNED_SEQ_NO || ifPrimaryTerm != 0) { + if (ifSeqNo != UNASSIGNED_SEQ_NO || ifPrimaryTerm != UNASSIGNED_PRIMARY_TERM) { validationException = addValidationError("create operations do not support compare and set. use index instead", validationException); return validationException; @@ -207,15 +208,15 @@ public class IndexRequest extends ReplicatedWriteRequest implement validationException = addValidationError("pipeline cannot be an empty string", validationException); } - if (ifSeqNo != SequenceNumbers.UNASSIGNED_SEQ_NO && ( + if (ifSeqNo != UNASSIGNED_SEQ_NO && ( versionType != VersionType.INTERNAL || version != Versions.MATCH_ANY )) { validationException = addValidationError("compare and write operations can not use versioning", validationException); } - if (ifPrimaryTerm == 0 && ifSeqNo != SequenceNumbers.UNASSIGNED_SEQ_NO) { + if (ifPrimaryTerm == UNASSIGNED_PRIMARY_TERM && ifSeqNo != UNASSIGNED_SEQ_NO) { validationException = addValidationError("ifSeqNo is set, but primary term is [0]", validationException); } - if (ifPrimaryTerm != 0 && ifSeqNo == SequenceNumbers.UNASSIGNED_SEQ_NO) { + if (ifPrimaryTerm != UNASSIGNED_PRIMARY_TERM && ifSeqNo == UNASSIGNED_SEQ_NO) { validationException = addValidationError("ifSeqNo is unassigned, but primary term is [" + ifPrimaryTerm + "]", validationException); } @@ -511,7 +512,7 @@ public class IndexRequest extends ReplicatedWriteRequest implement * {@link org.elasticsearch.index.engine.VersionConflictEngineException} will be thrown. */ public IndexRequest setIfSeqNo(long seqNo) { - if (seqNo < 0 && seqNo != SequenceNumbers.UNASSIGNED_SEQ_NO) { + if (seqNo < 0 && seqNo != UNASSIGNED_SEQ_NO) { throw new IllegalArgumentException("sequence numbers must be non negative. got [" + seqNo + "]."); } ifSeqNo = seqNo; @@ -573,8 +574,8 @@ public class IndexRequest extends ReplicatedWriteRequest implement // generate id if not already provided if (id == null) { assert autoGeneratedTimestamp == -1 : "timestamp has already been generated!"; - assert ifSeqNo == SequenceNumbers.UNASSIGNED_SEQ_NO; - assert ifPrimaryTerm == 0; + assert ifSeqNo == UNASSIGNED_SEQ_NO; + assert ifPrimaryTerm == UNASSIGNED_PRIMARY_TERM; autoGeneratedTimestamp = Math.max(0, System.currentTimeMillis()); // extra paranoia String uid; if (indexCreatedVersion.onOrAfter(Version.V_6_0_0_beta1)) { @@ -620,8 +621,8 @@ public class IndexRequest extends ReplicatedWriteRequest implement ifSeqNo = in.readZLong(); ifPrimaryTerm = in.readVLong(); } else { - ifSeqNo = SequenceNumbers.UNASSIGNED_SEQ_NO; - ifPrimaryTerm = 0; + ifSeqNo = UNASSIGNED_SEQ_NO; + ifPrimaryTerm = UNASSIGNED_PRIMARY_TERM; } } @@ -657,7 +658,7 @@ public class IndexRequest extends ReplicatedWriteRequest implement if (out.getVersion().onOrAfter(Version.V_6_6_0)) { out.writeZLong(ifSeqNo); out.writeVLong(ifPrimaryTerm); - } else if (ifSeqNo != SequenceNumbers.UNASSIGNED_SEQ_NO || ifPrimaryTerm != 0) { + } else if (ifSeqNo != UNASSIGNED_SEQ_NO || ifPrimaryTerm != UNASSIGNED_PRIMARY_TERM) { assert false : "setIfMatch [" + ifSeqNo + "], currentDocTem [" + ifPrimaryTerm + "]"; throw new IllegalStateException( "sequence number based compare and write is not supported until all nodes are on version 7.0 or higher. " + diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index e9c8e7e47ea..4894ca1f772 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -87,6 +87,8 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Consumer; import java.util.function.Supplier; +import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM; + /** * Base class for requests that should be executed on a primary copy followed by replica copies. * Subclasses can resolve the target shard and provide implementation for primary and replica operations. @@ -1248,7 +1250,7 @@ public abstract class TransportReplicationAction< request = requestSupplier.get(); // null now, but will be populated by reading from the streams targetAllocationID = null; - primaryTerm = 0L; + primaryTerm = UNASSIGNED_PRIMARY_TERM; } public ConcreteShardRequest(R request, String targetAllocationID, long primaryTerm) { diff --git a/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java index 4f13c485fde..e5c46cbb0ee 100644 --- a/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java +++ b/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java @@ -33,6 +33,7 @@ import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.ClusterStateTaskListener; import org.elasticsearch.cluster.MasterNodeChangePredicate; import org.elasticsearch.cluster.NotMasterException; +import org.elasticsearch.cluster.coordination.FailedToCommitClusterStateException; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RoutingService; @@ -48,7 +49,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; -import org.elasticsearch.cluster.coordination.FailedToCommitClusterStateException; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.tasks.Task; @@ -74,6 +74,8 @@ import java.util.Set; import java.util.concurrent.ConcurrentMap; import java.util.function.Predicate; +import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM; + public class ShardStateAction { private static final Logger logger = LogManager.getLogger(ShardStateAction.class); @@ -603,7 +605,7 @@ public class ShardStateAction { allocationId = in.readString(); if (in.getVersion().before(Version.V_6_3_0)) { final long primaryTerm = in.readVLong(); - assert primaryTerm == 0L : "shard is only started by itself: primary term [" + primaryTerm + "]"; + assert primaryTerm == UNASSIGNED_PRIMARY_TERM : "shard is only started by itself: primary term [" + primaryTerm + "]"; } this.message = in.readString(); if (in.getVersion().before(Version.V_6_3_0)) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java index 5d23971dddb..06d5cd6f85d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java @@ -55,6 +55,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.gateway.MetaDataStateFormat; import org.elasticsearch.index.Index; import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.rest.RestStatus; @@ -1079,6 +1080,7 @@ public class IndexMetaData implements Diffable, ToXContentFragmen throw new IllegalStateException("you must set the number of shards before setting/reading primary terms"); } primaryTerms = new long[numberOfShards()]; + Arrays.fill(primaryTerms, SequenceNumbers.UNASSIGNED_PRIMARY_TERM); } diff --git a/server/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java b/server/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java index bdb794cbf0a..889721a49e0 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java @@ -33,10 +33,12 @@ import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver.DocIdAndSeqN import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver.DocIdAndVersion; import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.mapper.VersionFieldMapper; -import org.elasticsearch.index.seqno.SequenceNumbers; import java.io.IOException; +import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM; +import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; + /** Utility class to do efficient primary-key (only 1 doc contains the * given term) lookups by segment, re-using the enums. This class is @@ -116,18 +118,18 @@ final class PerThreadIDVersionAndSeqNoLookup { if (seqNos != null && seqNos.advanceExact(docID)) { seqNo = seqNos.longValue(); } else { - seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO; + seqNo = UNASSIGNED_SEQ_NO; } NumericDocValues terms = context.reader().getNumericDocValues(SeqNoFieldMapper.PRIMARY_TERM_NAME); if (terms != null && terms.advanceExact(docID)) { term = terms.longValue(); } else { - term = 0; + term = UNASSIGNED_PRIMARY_TERM; } } else { - seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO; - term = 0; + seqNo = UNASSIGNED_SEQ_NO; + term = UNASSIGNED_PRIMARY_TERM; } return new DocIdAndVersion(docID, versions.longValue(), seqNo, term, context.reader(), context.docBase); } else { @@ -175,7 +177,7 @@ final class PerThreadIDVersionAndSeqNoLookup { if (seqNoDV != null && seqNoDV.advanceExact(docID)) { seqNo = seqNoDV.longValue(); } else { - seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO; + seqNo = UNASSIGNED_SEQ_NO; } final boolean isLive = (liveDocs == null || liveDocs.get(docID)); if (isLive) { diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index f0d01574766..008b8533103 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -72,7 +72,6 @@ import org.elasticsearch.index.mapper.ParseContext.Document; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.merge.MergeStats; import org.elasticsearch.index.seqno.SeqNoStats; -import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.DocsStats; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.Store; @@ -106,6 +105,9 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.function.BiFunction; import java.util.stream.Stream; +import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM; +import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; + public abstract class Engine implements Closeable { public static final String SYNC_COMMIT_ID = "sync_id"; @@ -147,7 +149,7 @@ public abstract class Engine implements Closeable { * 1. A primary initializes this marker once using the max_seq_no from its history, then advances when processing an update or delete. * 2. A replica never advances this marker by itself but only inherits from its primary (via advanceMaxSeqNoOfUpdatesOrDeletes). */ - private final AtomicLong maxSeqNoOfUpdatesOrDeletes = new AtomicLong(SequenceNumbers.UNASSIGNED_SEQ_NO); + private final AtomicLong maxSeqNoOfUpdatesOrDeletes = new AtomicLong(UNASSIGNED_SEQ_NO); protected Engine(EngineConfig engineConfig) { Objects.requireNonNull(engineConfig.getStore(), "Store must be provided to the engine"); @@ -425,8 +427,8 @@ public abstract class Engine implements Closeable { protected Result(Operation.TYPE operationType, Mapping requiredMappingUpdate) { this.operationType = operationType; this.version = Versions.NOT_FOUND; - this.seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO; - this.term = 0L; + this.seqNo = UNASSIGNED_SEQ_NO; + this.term = UNASSIGNED_PRIMARY_TERM; this.failure = null; this.requiredMappingUpdate = requiredMappingUpdate; this.resultType = Type.MAPPING_UPDATE_REQUIRED; @@ -522,7 +524,7 @@ public abstract class Engine implements Closeable { * use in case of the index operation failed before getting to internal engine **/ public IndexResult(Exception failure, long version, long term) { - this(failure, version, term, SequenceNumbers.UNASSIGNED_SEQ_NO); + this(failure, version, term, UNASSIGNED_SEQ_NO); } public IndexResult(Exception failure, long version, long term, long seqNo) { @@ -554,7 +556,7 @@ public abstract class Engine implements Closeable { * use in case of the delete operation failed before getting to internal engine **/ public DeleteResult(Exception failure, long version, long term) { - this(failure, version, term, SequenceNumbers.UNASSIGNED_SEQ_NO, false); + this(failure, version, term, UNASSIGNED_SEQ_NO, false); } public DeleteResult(Exception failure, long version, long term, long seqNo, boolean found) { @@ -1353,9 +1355,9 @@ public abstract class Engine implements Closeable { super(uid, seqNo, primaryTerm, version, versionType, origin, startTime); assert (origin == Origin.PRIMARY) == (versionType != null) : "invalid version_type=" + versionType + " for origin=" + origin; assert ifPrimaryTerm >= 0 : "ifPrimaryTerm [" + ifPrimaryTerm + "] must be non negative"; - assert ifSeqNo == SequenceNumbers.UNASSIGNED_SEQ_NO || ifSeqNo >=0 : + assert ifSeqNo == UNASSIGNED_SEQ_NO || ifSeqNo >=0 : "ifSeqNo [" + ifSeqNo + "] must be non negative or unset"; - assert (origin == Origin.PRIMARY) || (ifSeqNo == SequenceNumbers.UNASSIGNED_SEQ_NO && ifPrimaryTerm == 0) : + assert (origin == Origin.PRIMARY) || (ifSeqNo == UNASSIGNED_SEQ_NO && ifPrimaryTerm == UNASSIGNED_PRIMARY_TERM) : "cas operations are only allowed if origin is primary. get [" + origin + "]"; this.doc = doc; this.isRetry = isRetry; @@ -1369,8 +1371,8 @@ public abstract class Engine implements Closeable { } // TEST ONLY Index(Term uid, long primaryTerm, ParsedDocument doc, long version) { - this(uid, doc, SequenceNumbers.UNASSIGNED_SEQ_NO, primaryTerm, version, VersionType.INTERNAL, - Origin.PRIMARY, System.nanoTime(), -1, false, SequenceNumbers.UNASSIGNED_SEQ_NO, 0); + this(uid, doc, UNASSIGNED_SEQ_NO, primaryTerm, version, VersionType.INTERNAL, + Origin.PRIMARY, System.nanoTime(), -1, false, UNASSIGNED_SEQ_NO, 0); } // TEST ONLY public ParsedDocument parsedDoc() { @@ -1447,9 +1449,9 @@ public abstract class Engine implements Closeable { super(uid, seqNo, primaryTerm, version, versionType, origin, startTime); assert (origin == Origin.PRIMARY) == (versionType != null) : "invalid version_type=" + versionType + " for origin=" + origin; assert ifPrimaryTerm >= 0 : "ifPrimaryTerm [" + ifPrimaryTerm + "] must be non negative"; - assert ifSeqNo == SequenceNumbers.UNASSIGNED_SEQ_NO || ifSeqNo >=0 : + assert ifSeqNo == UNASSIGNED_SEQ_NO || ifSeqNo >=0 : "ifSeqNo [" + ifSeqNo + "] must be non negative or unset"; - assert (origin == Origin.PRIMARY) || (ifSeqNo == SequenceNumbers.UNASSIGNED_SEQ_NO && ifPrimaryTerm == 0) : + assert (origin == Origin.PRIMARY) || (ifSeqNo == UNASSIGNED_SEQ_NO && ifPrimaryTerm == UNASSIGNED_PRIMARY_TERM) : "cas operations are only allowed if origin is primary. get [" + origin + "]"; this.type = Objects.requireNonNull(type); this.id = Objects.requireNonNull(id); @@ -1458,13 +1460,13 @@ public abstract class Engine implements Closeable { } public Delete(String type, String id, Term uid, long primaryTerm) { - this(type, id, uid, SequenceNumbers.UNASSIGNED_SEQ_NO, primaryTerm, Versions.MATCH_ANY, VersionType.INTERNAL, - Origin.PRIMARY, System.nanoTime(), SequenceNumbers.UNASSIGNED_SEQ_NO, 0); + this(type, id, uid, UNASSIGNED_SEQ_NO, primaryTerm, Versions.MATCH_ANY, VersionType.INTERNAL, + Origin.PRIMARY, System.nanoTime(), UNASSIGNED_SEQ_NO, 0); } public Delete(Delete template, VersionType versionType) { this(template.type(), template.id(), template.uid(), template.seqNo(), template.primaryTerm(), template.version(), - versionType, template.origin(), template.startTime(), SequenceNumbers.UNASSIGNED_SEQ_NO, 0); + versionType, template.origin(), template.startTime(), UNASSIGNED_SEQ_NO, 0); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/get/GetResult.java b/server/src/main/java/org/elasticsearch/index/get/GetResult.java index 5ec448888b3..869bc548f89 100644 --- a/server/src/main/java/org/elasticsearch/index/get/GetResult.java +++ b/server/src/main/java/org/elasticsearch/index/get/GetResult.java @@ -34,7 +34,6 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.IgnoredFieldMapper; import org.elasticsearch.index.mapper.SourceFieldMapper; -import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.search.lookup.SourceLookup; import java.io.IOException; @@ -48,6 +47,8 @@ import java.util.Objects; import static java.util.Collections.emptyMap; import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; +import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM; +import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; public class GetResult implements Streamable, Iterable, ToXContentObject { @@ -82,9 +83,9 @@ public class GetResult implements Streamable, Iterable, ToXConten this.id = id; this.seqNo = seqNo; this.primaryTerm = primaryTerm; - assert (seqNo == SequenceNumbers.UNASSIGNED_SEQ_NO && primaryTerm == 0) || (seqNo >= 0 && primaryTerm >= 1) : + assert (seqNo == UNASSIGNED_SEQ_NO && primaryTerm == UNASSIGNED_PRIMARY_TERM) || (seqNo >= 0 && primaryTerm >= 1) : "seqNo: " + seqNo + " primaryTerm: " + primaryTerm; - assert exists || (seqNo == SequenceNumbers.UNASSIGNED_SEQ_NO && primaryTerm == 0) : + assert exists || (seqNo == UNASSIGNED_SEQ_NO && primaryTerm == UNASSIGNED_PRIMARY_TERM) : "doc not found but seqNo/primaryTerm are set"; this.version = version; this.exists = exists; @@ -239,7 +240,7 @@ public class GetResult implements Streamable, Iterable, ToXConten } public XContentBuilder toXContentEmbedded(XContentBuilder builder, Params params) throws IOException { - if (seqNo != SequenceNumbers.UNASSIGNED_SEQ_NO) { // seqNo may not be assigned if read from an old node + if (seqNo != UNASSIGNED_SEQ_NO) { // seqNo may not be assigned if read from an old node builder.field(_SEQ_NO, seqNo); builder.field(_PRIMARY_TERM, primaryTerm); } @@ -313,8 +314,8 @@ public class GetResult implements Streamable, Iterable, ToXConten String currentFieldName = parser.currentName(); long version = -1; - long seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO; - long primaryTerm = 0; + long seqNo = UNASSIGNED_SEQ_NO; + long primaryTerm = UNASSIGNED_PRIMARY_TERM; Boolean found = null; BytesReference source = null; Map fields = new HashMap<>(); @@ -388,8 +389,8 @@ public class GetResult implements Streamable, Iterable, ToXConten seqNo = in.readZLong(); primaryTerm = in.readVLong(); } else { - seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO; - primaryTerm = 0L; + seqNo = UNASSIGNED_SEQ_NO; + primaryTerm = UNASSIGNED_PRIMARY_TERM; } version = in.readLong(); exists = in.readBoolean(); diff --git a/server/src/main/java/org/elasticsearch/index/seqno/SequenceNumbers.java b/server/src/main/java/org/elasticsearch/index/seqno/SequenceNumbers.java index 7cffc8c1ac9..6336e83338f 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/SequenceNumbers.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/SequenceNumbers.java @@ -41,6 +41,11 @@ public class SequenceNumbers { */ public static final long NO_OPS_PERFORMED = -1L; + /** + * Represents an unassigned primary term (e.g., when a primary shard was not yet allocated) + */ + public static final long UNASSIGNED_PRIMARY_TERM = 0L; + /** * Reads the sequence number stats from the commit data (maximum sequence number and local checkpoint). * diff --git a/server/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java b/server/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java index c61718332cd..bfd9e31abcd 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java +++ b/server/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.translog; import org.elasticsearch.common.io.stream.ByteBufferStreamInput; +import org.elasticsearch.index.seqno.SequenceNumbers; import java.io.IOException; import java.nio.ByteBuffer; @@ -113,7 +114,7 @@ public abstract class BaseTranslogReader implements Comparable getPrimaryTerm() && getPrimaryTerm() != TranslogHeader.UNKNOWN_PRIMARY_TERM) { + if (op.primaryTerm() > getPrimaryTerm() && getPrimaryTerm() != SequenceNumbers.UNASSIGNED_PRIMARY_TERM) { throw new TranslogCorruptedException( path.toString(), "operation's term is newer than translog header term; " + diff --git a/server/src/main/java/org/elasticsearch/index/translog/TranslogHeader.java b/server/src/main/java/org/elasticsearch/index/translog/TranslogHeader.java index d80a6729d30..d47f0d9d7fd 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TranslogHeader.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TranslogHeader.java @@ -34,6 +34,8 @@ import java.io.IOException; import java.nio.channels.FileChannel; import java.nio.file.Path; +import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM; + /** * Each translog file is started with a translog header then followed by translog operations. */ @@ -45,8 +47,6 @@ final class TranslogHeader { public static final int VERSION_PRIMARY_TERM = 3; // added primary term public static final int CURRENT_VERSION = VERSION_PRIMARY_TERM; - public static final long UNKNOWN_PRIMARY_TERM = 0L; - private final String translogUUID; private final long primaryTerm; private final int headerSizeInBytes; @@ -146,7 +146,7 @@ final class TranslogHeader { primaryTerm = in.readLong(); } else { assert version == VERSION_CHECKPOINTS : "Unknown header version [" + version + "]"; - primaryTerm = UNKNOWN_PRIMARY_TERM; + primaryTerm = UNASSIGNED_PRIMARY_TERM; } // Verify the checksum if (version >= VERSION_PRIMARY_TERM) { diff --git a/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogAction.java b/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogAction.java index 87600f4441b..133055f2917 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogAction.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogAction.java @@ -207,7 +207,7 @@ public class TruncateTranslogAction { */ private static int writeEmptyTranslog(Path filename, String translogUUID) throws IOException { try (FileChannel fc = FileChannel.open(filename, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW)) { - TranslogHeader header = new TranslogHeader(translogUUID, TranslogHeader.UNKNOWN_PRIMARY_TERM); + TranslogHeader header = new TranslogHeader(translogUUID, SequenceNumbers.UNASSIGNED_PRIMARY_TERM); header.write(fc); return header.sizeInBytes(); } diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 05c02773067..696d50a93a9 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -172,6 +172,7 @@ import static org.elasticsearch.index.engine.Engine.Operation.Origin.LOCAL_TRANS import static org.elasticsearch.index.engine.Engine.Operation.Origin.PEER_RECOVERY; import static org.elasticsearch.index.engine.Engine.Operation.Origin.PRIMARY; import static org.elasticsearch.index.engine.Engine.Operation.Origin.REPLICA; +import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM; import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; import static org.elasticsearch.index.translog.TranslogDeletionPolicies.createTranslogDeletionPolicy; import static org.hamcrest.CoreMatchers.instanceOf; @@ -1801,7 +1802,7 @@ public class InternalEngineTests extends EngineTestCase { int opsPerformed = 0; long lastOpVersion = currentOpVersion; long lastOpSeqNo = UNASSIGNED_SEQ_NO; - long lastOpTerm = 0; + long lastOpTerm = UNASSIGNED_PRIMARY_TERM; final AtomicLong currentTerm = new AtomicLong(1); BiFunction indexWithVersion = (version, index) -> new Engine.Index(index.uid(), index.parsedDoc(), UNASSIGNED_SEQ_NO, currentTerm.get(), version, index.versionType(), index.origin(), index.startTime(), @@ -1900,7 +1901,7 @@ public class InternalEngineTests extends EngineTestCase { docDeleted = true; lastOpVersion = result.getVersion(); lastOpSeqNo = UNASSIGNED_SEQ_NO; - lastOpTerm = 0; + lastOpTerm = UNASSIGNED_PRIMARY_TERM; opsPerformed++; } } @@ -4309,7 +4310,7 @@ public class InternalEngineTests extends EngineTestCase { final long seqNo; DocIdAndSeqNo docIdAndSeqNo = VersionsAndSeqNoResolver.loadDocIdAndSeqNo(searcher.reader(), get.uid()); if (docIdAndSeqNo == null) { - primaryTerm = 0; + primaryTerm = UNASSIGNED_PRIMARY_TERM; seqNo = UNASSIGNED_SEQ_NO; } else { seqNo = docIdAndSeqNo.seqNo; diff --git a/server/src/test/java/org/elasticsearch/index/get/GetResultTests.java b/server/src/test/java/org/elasticsearch/index/get/GetResultTests.java index 0dc6b2573ea..ad8673d13ea 100644 --- a/server/src/test/java/org/elasticsearch/index/get/GetResultTests.java +++ b/server/src/test/java/org/elasticsearch/index/get/GetResultTests.java @@ -44,6 +44,7 @@ import static java.util.Collections.singletonMap; import static org.elasticsearch.common.xcontent.XContentHelper.toXContent; import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; import static org.elasticsearch.index.get.DocumentFieldTests.randomDocumentField; +import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM; import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; @@ -209,7 +210,7 @@ public class GetResultTests extends ESTestCase { } } else { seqNo = UNASSIGNED_SEQ_NO; - primaryTerm = 0; + primaryTerm = UNASSIGNED_PRIMARY_TERM; version = -1; exists = false; } diff --git a/server/src/test/java/org/elasticsearch/index/translog/TranslogHeaderTests.java b/server/src/test/java/org/elasticsearch/index/translog/TranslogHeaderTests.java index 99e21d47604..7d06e25519a 100644 --- a/server/src/test/java/org/elasticsearch/index/translog/TranslogHeaderTests.java +++ b/server/src/test/java/org/elasticsearch/index/translog/TranslogHeaderTests.java @@ -83,7 +83,7 @@ public class TranslogHeaderTests extends ESTestCase { try (FileChannel channel = FileChannel.open(translogFile, StandardOpenOption.READ)) { final TranslogHeader inHeader = TranslogHeader.read(translogUUID, translogFile, channel); assertThat(inHeader.getTranslogUUID(), equalTo(translogUUID)); - assertThat(inHeader.getPrimaryTerm(), equalTo(TranslogHeader.UNKNOWN_PRIMARY_TERM)); + assertThat(inHeader.getPrimaryTerm(), equalTo(SequenceNumbers.UNASSIGNED_PRIMARY_TERM)); assertThat(inHeader.sizeInBytes(), equalTo((int)channel.position())); } expectThrows(TranslogCorruptedException.class, () -> { From 487a1c4f71262123136dd0f5e6a5cbf0f2696399 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Wed, 19 Dec 2018 13:26:04 +0100 Subject: [PATCH 09/20] Fix cluster state persistence for single-node discovery (#36825) Single-node discovery is not persisting cluster states, which was caused by a recent 7.0-only refactoring. This commit ensures that the cluster state is properly persisted when using single-node discovery and adds a corresponding test. --- .../org/elasticsearch/discovery/DiscoveryModule.java | 3 ++- .../discovery/single/SingleNodeDiscovery.java | 9 ++++++++- .../discovery/single/SingleNodeDiscoveryIT.java | 6 ++++++ .../discovery/single/SingleNodeDiscoveryTests.java | 2 +- .../java/org/elasticsearch/test/InternalTestCluster.java | 3 ++- 5 files changed, 19 insertions(+), 4 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java b/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java index f4a44a75789..1572548b1b1 100644 --- a/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java +++ b/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java @@ -132,7 +132,8 @@ public class DiscoveryModule { transportService, namedWriteableRegistry, allocationService, masterService, () -> gatewayMetaState.getPersistedState(settings, (ClusterApplierService) clusterApplier), hostsProvider, clusterApplier, Randomness.get())); - discoveryTypes.put("single-node", () -> new SingleNodeDiscovery(settings, transportService, masterService, clusterApplier)); + discoveryTypes.put("single-node", () -> new SingleNodeDiscovery(settings, transportService, masterService, clusterApplier, + gatewayMetaState)); for (DiscoveryPlugin plugin : plugins) { plugin.getDiscoveryTypes(threadPool, transportService, namedWriteableRegistry, masterService, clusterApplier, clusterSettings, hostsProvider, allocationService, gatewayMetaState).forEach((key, value) -> { diff --git a/server/src/main/java/org/elasticsearch/discovery/single/SingleNodeDiscovery.java b/server/src/main/java/org/elasticsearch/discovery/single/SingleNodeDiscovery.java index c759fbf4d15..238f72f72f4 100644 --- a/server/src/main/java/org/elasticsearch/discovery/single/SingleNodeDiscovery.java +++ b/server/src/main/java/org/elasticsearch/discovery/single/SingleNodeDiscovery.java @@ -31,12 +31,14 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterApplier; import org.elasticsearch.cluster.service.ClusterApplier.ClusterApplyListener; +import org.elasticsearch.cluster.service.ClusterApplierService; import org.elasticsearch.cluster.service.MasterService; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.DiscoveryStats; +import org.elasticsearch.gateway.GatewayMetaState; import org.elasticsearch.transport.TransportService; import java.util.Objects; @@ -55,12 +57,17 @@ public class SingleNodeDiscovery extends AbstractLifecycleComponent implements D private volatile ClusterState clusterState; public SingleNodeDiscovery(final Settings settings, final TransportService transportService, - final MasterService masterService, final ClusterApplier clusterApplier) { + final MasterService masterService, final ClusterApplier clusterApplier, + final GatewayMetaState gatewayMetaState) { super(Objects.requireNonNull(settings)); this.clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings); this.transportService = Objects.requireNonNull(transportService); masterService.setClusterStateSupplier(() -> clusterState); this.clusterApplier = clusterApplier; + + if (clusterApplier instanceof ClusterApplierService) { + ((ClusterApplierService) clusterApplier).addLowPriorityApplier(gatewayMetaState); + } } @Override diff --git a/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java b/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java index c2a61263d3a..31005ea83cd 100644 --- a/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java @@ -167,4 +167,10 @@ public class SingleNodeDiscoveryIT extends ESIntegTestCase { } } + public void testStatePersistence() throws Exception { + createIndex("test"); + internalCluster().fullRestart(); + assertTrue(client().admin().indices().prepareExists("test").get().isExists()); + } + } diff --git a/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryTests.java b/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryTests.java index d34d3e9d6a7..c3dfad2d437 100644 --- a/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryTests.java @@ -69,7 +69,7 @@ public class SingleNodeDiscoveryTests extends ESTestCase { clusterState.set(clusterStateSupplier.get()); listener.onSuccess(source); } - }); + }, null); discovery.start(); discovery.startInitialJoin(); final DiscoveryNodes nodes = clusterState.get().nodes(); diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 246e0f555b1..db719389665 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -961,7 +961,8 @@ public final class InternalTestCluster extends TestCluster { .put(newSettings) .put(NodeEnvironment.NODE_ID_SEED_SETTING.getKey(), newIdSeed) .build(); - if (DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.exists(finalSettings) == false) { + final boolean usingSingleNodeDiscovery = DiscoveryModule.DISCOVERY_TYPE_SETTING.get(finalSettings).equals("single-node"); + if (usingSingleNodeDiscovery == false && DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.exists(finalSettings) == false) { throw new IllegalStateException(DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey() + " is not configured after restart of [" + name + "]"); } From 3f8f907606e3b7c5f5d370c36da58876bcbd0051 Mon Sep 17 00:00:00 2001 From: David Turner Date: Wed, 19 Dec 2018 12:51:14 +0000 Subject: [PATCH 10/20] Extend time allowed to detect disconnection (#36827) If the master sends both its follower checks just before disconnection then neither will receive a response, meaning it must wait for the checks to time out and send another in order to detect the disconnection and stand down. Closes #36788 --- .../cluster/coordination/CoordinatorTests.java | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java index 5407cbc54c6..7a1e63245a6 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java @@ -105,13 +105,13 @@ import static org.elasticsearch.discovery.PeerFinder.DISCOVERY_FIND_PEERS_INTERV import static org.elasticsearch.node.Node.NODE_NAME_SETTING; import static org.elasticsearch.transport.TransportService.HANDSHAKE_ACTION_NAME; import static org.elasticsearch.transport.TransportService.NOOP_TRANSPORT_INTERCEPTOR; -import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThan; @@ -946,9 +946,10 @@ public class CoordinatorTests extends ESTestCase { cluster.runFor(DEFAULT_CLUSTER_STATE_UPDATE_DELAY, "committing setting update"); leader.disconnect(); - cluster.runFor(defaultMillis(FOLLOWER_CHECK_INTERVAL_SETTING) + DEFAULT_CLUSTER_STATE_UPDATE_DELAY, "detecting disconnection"); + cluster.runFor(defaultMillis(FOLLOWER_CHECK_TIMEOUT_SETTING) + defaultMillis(FOLLOWER_CHECK_INTERVAL_SETTING) + + DEFAULT_CLUSTER_STATE_UPDATE_DELAY, "detecting disconnection"); - assertThat(leader.clusterApplier.lastAppliedClusterState.blocks().global(), contains(expectedBlock)); + assertThat(leader.clusterApplier.lastAppliedClusterState.blocks().global(), hasItem(expectedBlock)); // TODO reboot the leader and verify that the same block is applied when it restarts } From 3cc0cf03c6da31304e5dca27a8a672e5159c777e Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Wed, 19 Dec 2018 13:51:12 +0100 Subject: [PATCH 11/20] [TEST] No need to specifically check licensesMetaData on master node. --- .../test/java/org/elasticsearch/xpack/CcrIntegTestCase.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java index 9b6933db386..1d705934cce 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java @@ -126,7 +126,7 @@ public abstract class CcrIntegTestCase extends ESTestCase { leaderCluster.beforeTest(random(), 0.0D); leaderCluster.ensureAtLeastNumDataNodes(numberOfNodesPerCluster()); assertBusy(() -> { - ClusterService clusterService = leaderCluster.getMasterNodeInstance(ClusterService.class); + ClusterService clusterService = leaderCluster.getInstance(ClusterService.class); assertNotNull(clusterService.state().metaData().custom(LicensesMetaData.TYPE)); }); @@ -139,7 +139,7 @@ public abstract class CcrIntegTestCase extends ESTestCase { followerCluster.beforeTest(random(), 0.0D); followerCluster.ensureAtLeastNumDataNodes(numberOfNodesPerCluster()); assertBusy(() -> { - ClusterService clusterService = followerCluster.getMasterNodeInstance(ClusterService.class); + ClusterService clusterService = followerCluster.getInstance(ClusterService.class); assertNotNull(clusterService.state().metaData().custom(LicensesMetaData.TYPE)); }); } From 63aa8756b26541248a968d84c229a585f18fc1b1 Mon Sep 17 00:00:00 2001 From: Albert Zaharovits Date: Wed, 19 Dec 2018 14:56:40 +0200 Subject: [PATCH 12/20] Add X-Forwarded-For to the logfile audit (#36427) Extracts the value of the X-Forwarded-For HTTP request header and places it in the audit entries from the logfile output. --- .../en/security/auditing/event-types.asciidoc | 10 ++++++ .../core/src/main/config/log4j2.properties | 2 ++ .../xpack/security/Security.java | 3 ++ .../xpack/security/audit/AuditTrail.java | 2 ++ .../audit/logfile/LoggingAuditTrail.java | 29 +++++++++++++++ .../audit/logfile/LoggingAuditTrailTests.java | 36 +++++++++++++++++++ 6 files changed, 82 insertions(+) diff --git a/x-pack/docs/en/security/auditing/event-types.asciidoc b/x-pack/docs/en/security/auditing/event-types.asciidoc index 27db063bb40..e2d71912ba9 100644 --- a/x-pack/docs/en/security/auditing/event-types.asciidoc +++ b/x-pack/docs/en/security/auditing/event-types.asciidoc @@ -94,6 +94,16 @@ descriptions for the format that you are using. the request associated with this event. This header can be used freely by the client to mark API calls, as it has no semantics in Elasticsearch. +`x_forwarded_for` :: The verbatim value of the `X-Forwarded-For` HTTP request + header (if present) of the request associated with the + audit event. This header is commonly added by proxies + when they forward requests and the value is the address + of the proxied client. When a request crosses multiple + proxies the header is a comma delimited list with the + last value being the address of the second to last + proxy server (the address of the last proxy server is + designated by the `origin.address` field). + ==== Audit event attributes of the REST event type diff --git a/x-pack/plugin/core/src/main/config/log4j2.properties b/x-pack/plugin/core/src/main/config/log4j2.properties index 528e015bdf8..21b0732fed4 100644 --- a/x-pack/plugin/core/src/main/config/log4j2.properties +++ b/x-pack/plugin/core/src/main/config/log4j2.properties @@ -28,6 +28,7 @@ appender.audit_rolling.layout.pattern = {\ %varsNotEmpty{, "request.name":"%enc{%map{request.name}}{JSON}"}\ %varsNotEmpty{, "indices":%map{indices}}\ %varsNotEmpty{, "opaque_id":"%enc{%map{opaque_id}}{JSON}"}\ + %varsNotEmpty{, "x_forwarded_for":"%enc{%map{x_forwarded_for}}{JSON}"}\ %varsNotEmpty{, "transport.profile":"%enc{%map{transport.profile}}{JSON}"}\ %varsNotEmpty{, "rule":"%enc{%map{rule}}{JSON}"}\ %varsNotEmpty{, "event.category":"%enc{%map{event.category}}{JSON}"}\ @@ -56,6 +57,7 @@ appender.audit_rolling.layout.pattern = {\ # "request.name" if the event is in connection to a transport message this is the name of the request class, similar to how rest requests are identified by the url path (internal) # "indices" the array of indices that the "action" is acting upon # "opaque_id" opaque value conveyed by the "X-Opaque-Id" request header +# "x_forwarded_for" the addresses from the "X-Forwarded-For" request header, as a verbatim string value (not an array) # "transport.profile" name of the transport profile in case this is a "connection_granted" or "connection_denied" event # "rule" name of the applied rulee if the "origin.type" is "ip_filter" # "event.category" fixed value "elasticsearch-audit" diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index 6b41ab02071..3a735332a6b 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -634,6 +634,9 @@ public class Security extends Plugin implements ActionPlugin, IngestPlugin, Netw } Set headers = new HashSet<>(); headers.add(UsernamePasswordToken.BASIC_AUTH_HEADER); + if (XPackSettings.AUDIT_ENABLED.get(settings)) { + headers.add(AuditTrail.X_FORWARDED_FOR_HEADER); + } if (AuthenticationServiceField.RUN_AS_ENABLED.get(settings)) { headers.add(AuthenticationServiceField.RUN_AS_USER_HEADER); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/AuditTrail.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/AuditTrail.java index d85d1271886..4f5413c30d1 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/AuditTrail.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/AuditTrail.java @@ -16,6 +16,8 @@ import java.net.InetAddress; public interface AuditTrail { + String X_FORWARDED_FOR_HEADER = "X-Forwarded-For"; + String name(); void authenticationSuccess(String requestId, String realm, User user, RestRequest request); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java index c74a11a94a4..cdeee882c1b 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java @@ -108,6 +108,7 @@ public class LoggingAuditTrail implements AuditTrail, ClusterStateListener { public static final String TRANSPORT_PROFILE_FIELD_NAME = "transport.profile"; public static final String RULE_FIELD_NAME = "rule"; public static final String OPAQUE_ID_FIELD_NAME = "opaque_id"; + public static final String X_FORWARDED_FOR_FIELD_NAME = "x_forwarded_for"; public static final String NAME = "logfile"; public static final Setting EMIT_HOST_ADDRESS_SETTING = Setting.boolSetting(setting("audit.logfile.emit_node_host_address"), @@ -216,6 +217,7 @@ public class LoggingAuditTrail implements AuditTrail, ClusterStateListener { .withRestOrigin(request) .withRequestBody(request) .withOpaqueId(threadContext) + .withXForwardedFor(threadContext) .build(); logger.info(logEntry); } @@ -238,6 +240,7 @@ public class LoggingAuditTrail implements AuditTrail, ClusterStateListener { .withRestOrTransportOrigin(message, threadContext) .with(INDICES_FIELD_NAME, indices.orElse(null)) .withOpaqueId(threadContext) + .withXForwardedFor(threadContext) .build(); logger.info(logEntry); } @@ -259,6 +262,7 @@ public class LoggingAuditTrail implements AuditTrail, ClusterStateListener { .withRestOrTransportOrigin(message, threadContext) .with(INDICES_FIELD_NAME, indices.orElse(null)) .withOpaqueId(threadContext) + .withXForwardedFor(threadContext) .build(); logger.info(logEntry); } @@ -277,6 +281,7 @@ public class LoggingAuditTrail implements AuditTrail, ClusterStateListener { .withRequestBody(request) .withRequestId(requestId) .withOpaqueId(threadContext) + .withXForwardedFor(threadContext) .build(); logger.info(logEntry); } @@ -298,6 +303,7 @@ public class LoggingAuditTrail implements AuditTrail, ClusterStateListener { .withRestOrTransportOrigin(message, threadContext) .with(INDICES_FIELD_NAME, indices.orElse(null)) .withOpaqueId(threadContext) + .withXForwardedFor(threadContext) .build(); logger.info(logEntry); } @@ -315,6 +321,7 @@ public class LoggingAuditTrail implements AuditTrail, ClusterStateListener { .withRequestBody(request) .withRequestId(requestId) .withOpaqueId(threadContext) + .withXForwardedFor(threadContext) .build(); logger.info(logEntry); } @@ -335,6 +342,7 @@ public class LoggingAuditTrail implements AuditTrail, ClusterStateListener { .withRestOrTransportOrigin(message, threadContext) .with(INDICES_FIELD_NAME, indices.orElse(null)) .withOpaqueId(threadContext) + .withXForwardedFor(threadContext) .build(); logger.info(logEntry); } @@ -354,6 +362,7 @@ public class LoggingAuditTrail implements AuditTrail, ClusterStateListener { .withRequestBody(request) .withRequestId(requestId) .withOpaqueId(threadContext) + .withXForwardedFor(threadContext) .build(); logger.info(logEntry); } @@ -376,6 +385,7 @@ public class LoggingAuditTrail implements AuditTrail, ClusterStateListener { .withRestOrTransportOrigin(message, threadContext) .with(INDICES_FIELD_NAME, indices.orElse(null)) .withOpaqueId(threadContext) + .withXForwardedFor(threadContext) .build(); logger.info(logEntry); } @@ -396,6 +406,7 @@ public class LoggingAuditTrail implements AuditTrail, ClusterStateListener { .withRequestBody(request) .withRequestId(requestId) .withOpaqueId(threadContext) + .withXForwardedFor(threadContext) .build(); logger.info(logEntry); } @@ -420,6 +431,7 @@ public class LoggingAuditTrail implements AuditTrail, ClusterStateListener { .with(INDICES_FIELD_NAME, indices.orElse(null)) .with(PRINCIPAL_ROLES_FIELD_NAME, roleNames) .withOpaqueId(threadContext) + .withXForwardedFor(threadContext) .build(); logger.info(logEntry); } @@ -443,6 +455,7 @@ public class LoggingAuditTrail implements AuditTrail, ClusterStateListener { .with(INDICES_FIELD_NAME, indices.orElse(null)) .with(PRINCIPAL_ROLES_FIELD_NAME, roleNames) .withOpaqueId(threadContext) + .withXForwardedFor(threadContext) .build(); logger.info(logEntry); } @@ -460,6 +473,7 @@ public class LoggingAuditTrail implements AuditTrail, ClusterStateListener { .withRequestBody(request) .withRequestId(requestId) .withOpaqueId(threadContext) + .withXForwardedFor(threadContext) .build(); logger.info(logEntry); } @@ -480,6 +494,7 @@ public class LoggingAuditTrail implements AuditTrail, ClusterStateListener { .withRestOrTransportOrigin(message, threadContext) .with(INDICES_FIELD_NAME, indices.orElse(null)) .withOpaqueId(threadContext) + .withXForwardedFor(threadContext) .build(); logger.info(logEntry); } @@ -502,6 +517,7 @@ public class LoggingAuditTrail implements AuditTrail, ClusterStateListener { .withPrincipal(user) .with(INDICES_FIELD_NAME, indices.orElse(null)) .withOpaqueId(threadContext) + .withXForwardedFor(threadContext) .build(); logger.info(logEntry); } @@ -520,6 +536,7 @@ public class LoggingAuditTrail implements AuditTrail, ClusterStateListener { .with(TRANSPORT_PROFILE_FIELD_NAME, profile) .with(RULE_FIELD_NAME, rule.toString()) .withOpaqueId(threadContext) + .withXForwardedFor(threadContext) .build(); logger.info(logEntry); } @@ -537,6 +554,7 @@ public class LoggingAuditTrail implements AuditTrail, ClusterStateListener { .with(TRANSPORT_PROFILE_FIELD_NAME, profile) .with(RULE_FIELD_NAME, rule.toString()) .withOpaqueId(threadContext) + .withXForwardedFor(threadContext) .build(); logger.info(logEntry); } @@ -559,6 +577,7 @@ public class LoggingAuditTrail implements AuditTrail, ClusterStateListener { .with(INDICES_FIELD_NAME, indices.orElse(null)) .with(PRINCIPAL_ROLES_FIELD_NAME, roleNames) .withOpaqueId(threadContext) + .withXForwardedFor(threadContext) .build(); logger.info(logEntry); } @@ -582,6 +601,7 @@ public class LoggingAuditTrail implements AuditTrail, ClusterStateListener { .with(INDICES_FIELD_NAME, indices.orElse(null)) .with(PRINCIPAL_ROLES_FIELD_NAME, roleNames) .withOpaqueId(threadContext) + .withXForwardedFor(threadContext) .build(); logger.info(logEntry); } @@ -603,6 +623,7 @@ public class LoggingAuditTrail implements AuditTrail, ClusterStateListener { .withRequestBody(request) .withRequestId(requestId) .withOpaqueId(threadContext) + .withXForwardedFor(threadContext) .build(); logger.info(logEntry); } @@ -696,6 +717,14 @@ public class LoggingAuditTrail implements AuditTrail, ClusterStateListener { return this; } + LogEntryBuilder withXForwardedFor(ThreadContext threadContext) { + final String xForwardedFor = threadContext.getHeader(AuditTrail.X_FORWARDED_FOR_HEADER); + if (xForwardedFor != null) { + logEntry.with(X_FORWARDED_FOR_FIELD_NAME, xForwardedFor); + } + return this; + } + LogEntryBuilder withPrincipal(User user) { logEntry.with(PRINCIPAL_FIELD_NAME, user.principal()); if (user.isRunAs()) { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailTests.java index 5492f35d77a..da4823a3f3a 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailTests.java @@ -38,6 +38,7 @@ import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef; import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; import org.elasticsearch.xpack.core.security.user.SystemUser; import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.security.audit.AuditTrail; import org.elasticsearch.xpack.security.audit.AuditUtil; import org.elasticsearch.xpack.security.rest.RemoteHostHeader; import org.elasticsearch.xpack.security.transport.filter.IPFilter; @@ -189,6 +190,10 @@ public class LoggingAuditTrailTests extends ESTestCase { if (randomBoolean()) { threadContext.putHeader(Task.X_OPAQUE_ID, randomAlphaOfLengthBetween(1, 4)); } + if (randomBoolean()) { + threadContext.putHeader(AuditTrail.X_FORWARDED_FOR_HEADER, + randomFrom("2001:db8:85a3:8d3:1319:8a2e:370:7348", "203.0.113.195", "203.0.113.195, 70.41.3.18, 150.172.238.178")); + } logger = CapturingLogger.newCapturingLogger(Level.INFO, patternLayout); auditTrail = new LoggingAuditTrail(settings, clusterService, logger, threadContext); } @@ -212,6 +217,7 @@ public class LoggingAuditTrailTests extends ESTestCase { indicesRequest(message, checkedFields, checkedArrayFields); restOrTransportOrigin(message, threadContext, checkedFields); opaqueId(threadContext, checkedFields); + forwardedFor(threadContext, checkedFields); assertMsg(logger, checkedFields.immutableMap(), checkedArrayFields.immutableMap()); // test disabled @@ -245,6 +251,7 @@ public class LoggingAuditTrailTests extends ESTestCase { .put(LoggingAuditTrail.URL_PATH_FIELD_NAME, "_uri") .put(LoggingAuditTrail.URL_QUERY_FIELD_NAME, null); opaqueId(threadContext, checkedFields); + forwardedFor(threadContext, checkedFields); assertMsg(logger, checkedFields.immutableMap()); // test disabled @@ -275,6 +282,7 @@ public class LoggingAuditTrailTests extends ESTestCase { restOrTransportOrigin(message, threadContext, checkedFields); indicesRequest(message, checkedFields, checkedArrayFields); opaqueId(threadContext, checkedFields); + forwardedFor(threadContext, checkedFields); assertMsg(logger, checkedFields.immutableMap(), checkedArrayFields.immutableMap()); // test disabled @@ -303,6 +311,7 @@ public class LoggingAuditTrailTests extends ESTestCase { restOrTransportOrigin(message, threadContext, checkedFields); indicesRequest(message, checkedFields, checkedArrayFields); opaqueId(threadContext, checkedFields); + forwardedFor(threadContext, checkedFields); assertMsg(logger, checkedFields.immutableMap(), checkedArrayFields.immutableMap()); // test disabled @@ -343,6 +352,7 @@ public class LoggingAuditTrailTests extends ESTestCase { .put(LoggingAuditTrail.URL_PATH_FIELD_NAME, "_uri") .put(LoggingAuditTrail.URL_QUERY_FIELD_NAME, params.isEmpty() ? null : "foo=bar"); opaqueId(threadContext, checkedFields); + forwardedFor(threadContext, checkedFields); assertMsg(logger, checkedFields.immutableMap()); // test disabled @@ -382,6 +392,7 @@ public class LoggingAuditTrailTests extends ESTestCase { .put(LoggingAuditTrail.URL_PATH_FIELD_NAME, "_uri") .put(LoggingAuditTrail.URL_QUERY_FIELD_NAME, params.isEmpty() ? null : "bar=baz"); opaqueId(threadContext, checkedFields); + forwardedFor(threadContext, checkedFields); assertMsg(logger, checkedFields.immutableMap()); // test disabled @@ -422,6 +433,7 @@ public class LoggingAuditTrailTests extends ESTestCase { restOrTransportOrigin(message, threadContext, checkedFields); indicesRequest(message, checkedFields, checkedArrayFields); opaqueId(threadContext, checkedFields); + forwardedFor(threadContext, checkedFields); assertMsg(logger, checkedFields.immutableMap(), checkedArrayFields.immutableMap()); } @@ -462,6 +474,7 @@ public class LoggingAuditTrailTests extends ESTestCase { .put(LoggingAuditTrail.URL_PATH_FIELD_NAME, "_uri") .put(LoggingAuditTrail.URL_QUERY_FIELD_NAME, params.isEmpty() ? null : "_param=baz"); opaqueId(threadContext, checkedFields); + forwardedFor(threadContext, checkedFields); assertMsg(logger, checkedFields.immutableMap()); } @@ -484,6 +497,7 @@ public class LoggingAuditTrailTests extends ESTestCase { restOrTransportOrigin(message, threadContext, checkedFields); indicesRequest(message, checkedFields, checkedArrayFields); opaqueId(threadContext, checkedFields); + forwardedFor(threadContext, checkedFields); assertMsg(logger, checkedFields.immutableMap(), checkedArrayFields.immutableMap()); // test disabled @@ -525,6 +539,7 @@ public class LoggingAuditTrailTests extends ESTestCase { restOrTransportOrigin(message, threadContext, checkedFields); indicesRequest(message, checkedFields, checkedArrayFields); opaqueId(threadContext, checkedFields); + forwardedFor(threadContext, checkedFields); assertMsg(logger, checkedFields.immutableMap(), checkedArrayFields.immutableMap()); } @@ -547,6 +562,7 @@ public class LoggingAuditTrailTests extends ESTestCase { restOrTransportOrigin(message, threadContext, checkedFields); indicesRequest(message, checkedFields, checkedArrayFields); opaqueId(threadContext, checkedFields); + forwardedFor(threadContext, checkedFields); assertMsg(logger, checkedFields.immutableMap(), checkedArrayFields.immutableMap()); // test disabled @@ -579,6 +595,8 @@ public class LoggingAuditTrailTests extends ESTestCase { restOrTransportOrigin(message, threadContext, checkedFields); indicesRequest(message, checkedFields, checkedArrayFields); opaqueId(threadContext, checkedFields); + forwardedFor(threadContext, checkedFields); + assertMsg(logger, checkedFields.immutableMap(), checkedArrayFields.immutableMap()); // test disabled @@ -615,6 +633,7 @@ public class LoggingAuditTrailTests extends ESTestCase { .put(LoggingAuditTrail.URL_PATH_FIELD_NAME, "_uri") .put(LoggingAuditTrail.URL_QUERY_FIELD_NAME, params.isEmpty() ? null : "_param=baz"); opaqueId(threadContext, checkedFields); + forwardedFor(threadContext, checkedFields); assertMsg(logger, checkedFields.immutableMap()); // test disabled @@ -643,6 +662,7 @@ public class LoggingAuditTrailTests extends ESTestCase { restOrTransportOrigin(message, threadContext, checkedFields); indicesRequest(message, checkedFields, checkedArrayFields); opaqueId(threadContext, checkedFields); + forwardedFor(threadContext, checkedFields); assertMsg(logger, checkedFields.immutableMap(), checkedArrayFields.immutableMap()); // test disabled @@ -684,6 +704,7 @@ public class LoggingAuditTrailTests extends ESTestCase { restOrTransportOrigin(message, threadContext, checkedFields); indicesRequest(message, checkedFields, checkedArrayFields); opaqueId(threadContext, checkedFields); + forwardedFor(threadContext, checkedFields); assertMsg(logger, checkedFields.immutableMap(), checkedArrayFields.immutableMap()); // test disabled @@ -713,6 +734,7 @@ public class LoggingAuditTrailTests extends ESTestCase { .put(LoggingAuditTrail.TRANSPORT_PROFILE_FIELD_NAME, profile) .put(LoggingAuditTrail.RULE_FIELD_NAME, "deny _all"); opaqueId(threadContext, checkedFields); + forwardedFor(threadContext, checkedFields); assertMsg(logger, checkedFields.immutableMap()); // test disabled @@ -751,6 +773,7 @@ public class LoggingAuditTrailTests extends ESTestCase { .put(LoggingAuditTrail.TRANSPORT_PROFILE_FIELD_NAME, profile) .put(LoggingAuditTrail.RULE_FIELD_NAME, "allow default:accept_all"); opaqueId(threadContext, checkedFields); + forwardedFor(threadContext, checkedFields); assertMsg(logger, checkedFields.immutableMap()); } @@ -779,6 +802,7 @@ public class LoggingAuditTrailTests extends ESTestCase { restOrTransportOrigin(message, threadContext, checkedFields); indicesRequest(message, checkedFields, checkedArrayFields); opaqueId(threadContext, checkedFields); + forwardedFor(threadContext, checkedFields); assertMsg(logger, checkedFields.immutableMap(), checkedArrayFields.immutableMap()); // test disabled @@ -817,6 +841,7 @@ public class LoggingAuditTrailTests extends ESTestCase { restOrTransportOrigin(message, threadContext, checkedFields); indicesRequest(message, checkedFields, checkedArrayFields); opaqueId(threadContext, checkedFields); + forwardedFor(threadContext, checkedFields); assertMsg(logger, checkedFields.immutableMap(), checkedArrayFields.immutableMap()); // test disabled @@ -878,6 +903,7 @@ public class LoggingAuditTrailTests extends ESTestCase { checkedFields.put(LoggingAuditTrail.PRINCIPAL_FIELD_NAME, "_username"); } opaqueId(threadContext, checkedFields); + forwardedFor(threadContext, checkedFields); assertMsg(logger, checkedFields.immutableMap()); } @@ -919,6 +945,7 @@ public class LoggingAuditTrailTests extends ESTestCase { restOrTransportOrigin(message, threadContext, checkedFields); indicesRequest(message, checkedFields, checkedArrayFields); opaqueId(threadContext, checkedFields); + forwardedFor(threadContext, checkedFields); assertMsg(logger, checkedFields.immutableMap(), checkedArrayFields.immutableMap()); } @@ -1177,6 +1204,15 @@ public class LoggingAuditTrailTests extends ESTestCase { } } + private static void forwardedFor(ThreadContext threadContext ,MapBuilder checkedFields) { + final String forwardedFor = threadContext.getHeader(AuditTrail.X_FORWARDED_FOR_HEADER); + if (forwardedFor != null) { + checkedFields.put(LoggingAuditTrail.X_FORWARDED_FOR_FIELD_NAME, forwardedFor); + } else { + checkedFields.put(LoggingAuditTrail.X_FORWARDED_FOR_FIELD_NAME, null); + } + } + private static void indicesRequest(TransportMessage message, MapBuilder checkedFields, MapBuilder checkedArrayFields) { if (message instanceof IndicesRequest) { From 18691daebe904ead72cb30e92f257026da92a6a8 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Wed, 19 Dec 2018 13:56:54 +0100 Subject: [PATCH 13/20] [TEST] Renamed ccr qa module. --- .../build.gradle | 0 .../src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename x-pack/plugin/ccr/qa/{downgraded-to-basic-license => downgrade-to-basic-license}/build.gradle (100%) rename x-pack/plugin/ccr/qa/{downgraded-to-basic-license => downgrade-to-basic-license}/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java (100%) diff --git a/x-pack/plugin/ccr/qa/downgraded-to-basic-license/build.gradle b/x-pack/plugin/ccr/qa/downgrade-to-basic-license/build.gradle similarity index 100% rename from x-pack/plugin/ccr/qa/downgraded-to-basic-license/build.gradle rename to x-pack/plugin/ccr/qa/downgrade-to-basic-license/build.gradle diff --git a/x-pack/plugin/ccr/qa/downgraded-to-basic-license/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java b/x-pack/plugin/ccr/qa/downgrade-to-basic-license/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java similarity index 100% rename from x-pack/plugin/ccr/qa/downgraded-to-basic-license/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java rename to x-pack/plugin/ccr/qa/downgrade-to-basic-license/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java From 8f141b8a41554f07217e1eaa6c0028d21c833ae4 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Wed, 19 Dec 2018 13:59:58 +0100 Subject: [PATCH 14/20] Fix ClusterInfoServiceIT timeouts (#36758) The test testClusterInfoServiceInformationClearOnError relies on timing behavior. It sets InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING to 1s and relies on the fact that the stats request completes within that timeframe (which our ever-so-slow CI seems to violate at times). Unfortunately the logging has been misimplemented in InternalClusterInfoService, so the corresponding log messages showing that the requests have timed out are missing for this. The issue can be locally reproduced by reducing the timeout to something lower. Closes #36554 --- .../cluster/InternalClusterInfoService.java | 10 ++++++---- .../elasticsearch/cluster/ClusterInfoServiceIT.java | 8 +++++++- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java b/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java index 559b554b007..e8261ca9f09 100644 --- a/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java +++ b/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java @@ -345,17 +345,19 @@ public class InternalClusterInfoService implements ClusterInfoService, LocalNode }); try { - nodeLatch.await(fetchTimeout.getMillis(), TimeUnit.MILLISECONDS); + if (nodeLatch.await(fetchTimeout.getMillis(), TimeUnit.MILLISECONDS) == false) { + logger.warn("Failed to update node information for ClusterInfoUpdateJob within {} timeout", fetchTimeout); + } } catch (InterruptedException e) { Thread.currentThread().interrupt(); // restore interrupt status - logger.warn("Failed to update node information for ClusterInfoUpdateJob within {} timeout", fetchTimeout); } try { - indicesLatch.await(fetchTimeout.getMillis(), TimeUnit.MILLISECONDS); + if (indicesLatch.await(fetchTimeout.getMillis(), TimeUnit.MILLISECONDS) == false) { + logger.warn("Failed to update shard information for ClusterInfoUpdateJob within {} timeout", fetchTimeout); + } } catch (InterruptedException e) { Thread.currentThread().interrupt(); // restore interrupt status - logger.warn("Failed to update shard information for ClusterInfoUpdateJob within {} timeout", fetchTimeout); } ClusterInfo clusterInfo = getClusterInfo(); try { diff --git a/server/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java b/server/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java index ae1caa787d4..99f89548524 100644 --- a/server/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java @@ -111,7 +111,6 @@ public class ClusterInfoServiceIT extends ESIntegTestCase { .put(super.nodeSettings(nodeOrdinal)) // manual collection or upon cluster forming. .put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), 2) - .put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey(), "1s") .build(); } @@ -120,6 +119,11 @@ public class ClusterInfoServiceIT extends ESIntegTestCase { return Arrays.asList(TestPlugin.class, MockTransportService.TestPlugin.class); } + private void setClusterInfoTimeout(String timeValue) { + assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder() + .put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey(), timeValue).build())); + } + public void testClusterInfoServiceCollectsInformation() throws Exception { internalCluster().startNodes(2); assertAcked(prepareCreate("test").setSettings(Settings.builder() @@ -204,6 +208,7 @@ public class ClusterInfoServiceIT extends ESIntegTestCase { }); } + setClusterInfoTimeout("1s"); // timeouts shouldn't clear the info timeout.set(true); info = infoService.refresh(); @@ -237,6 +242,7 @@ public class ClusterInfoServiceIT extends ESIntegTestCase { // check we recover blockingActionFilter.blockActions(); + setClusterInfoTimeout("15s"); info = infoService.refresh(); assertNotNull("info should not be null", info); assertThat(info.getNodeLeastAvailableDiskUsages().size(), equalTo(2)); From ad20d6bb836bb7a1c6a0438c36eb90de4bb92815 Mon Sep 17 00:00:00 2001 From: David Roberts Date: Wed, 19 Dec 2018 13:06:24 +0000 Subject: [PATCH 15/20] [ML] Followup to annotations index creation (#36824) Fixes two minor problems reported after merge of #36731: 1. Name the creation method to make clear it only creates if necessary 2. Avoid multiple simultaneous in-flight creation requests --- .../core/ml/annotations/AnnotationIndex.java | 10 +++++----- .../xpack/ml/MlInitializationService.java | 15 ++++++++++++--- 2 files changed, 17 insertions(+), 8 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/annotations/AnnotationIndex.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/annotations/AnnotationIndex.java index 29a808c74ce..843be6596c3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/annotations/AnnotationIndex.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/annotations/AnnotationIndex.java @@ -38,12 +38,12 @@ public class AnnotationIndex { public static final String INDEX_NAME = ".ml-annotations-6"; /** - * Create the .ml-annotations index with correct mappings. - * This index is read and written by the UI results views, - * so needs to exist when there might be ML results to view. + * Create the .ml-annotations index with correct mappings if it does not already + * exist. This index is read and written by the UI results views, so needs to + * exist when there might be ML results to view. */ - public static void createAnnotationsIndex(Settings settings, Client client, ClusterState state, - final ActionListener finalListener) { + public static void createAnnotationsIndexIfNecessary(Settings settings, Client client, ClusterState state, + final ActionListener finalListener) { final ActionListener createAliasListener = ActionListener.wrap(success -> { final IndicesAliasesRequest request = client.admin().indices().prepareAliases() diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlInitializationService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlInitializationService.java index 0722d213208..326081f545c 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlInitializationService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlInitializationService.java @@ -19,6 +19,8 @@ import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ml.annotations.AnnotationIndex; +import java.util.concurrent.atomic.AtomicBoolean; + class MlInitializationService implements LocalNodeMasterListener, ClusterStateListener { private static final Logger logger = LogManager.getLogger(MlInitializationService.class); @@ -27,6 +29,7 @@ class MlInitializationService implements LocalNodeMasterListener, ClusterStateLi private final ThreadPool threadPool; private final ClusterService clusterService; private final Client client; + private final AtomicBoolean isIndexCreationInProgress = new AtomicBoolean(false); private volatile MlDailyMaintenanceService mlDailyMaintenanceService; @@ -55,14 +58,20 @@ class MlInitializationService implements LocalNodeMasterListener, ClusterStateLi return; } - if (event.localNodeMaster()) { - AnnotationIndex.createAnnotationsIndex(settings, client, event.state(), ActionListener.wrap( + // The atomic flag prevents multiple simultaneous attempts to create the + // index if there is a flurry of cluster state updates in quick succession + if (event.localNodeMaster() && isIndexCreationInProgress.compareAndSet(false, true)) { + AnnotationIndex.createAnnotationsIndexIfNecessary(settings, client, event.state(), ActionListener.wrap( r -> { + isIndexCreationInProgress.set(false); if (r) { logger.info("Created ML annotations index and aliases"); } }, - e -> logger.error("Error creating ML annotations index or aliases", e))); + e -> { + isIndexCreationInProgress.set(false); + logger.error("Error creating ML annotations index or aliases", e); + })); } } From f2a5373495f38aa9148c811732cf858eb5c19eeb Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Wed, 19 Dec 2018 08:24:10 -0500 Subject: [PATCH 16/20] Add the 6.7.0 constant to the master branch Now that the 6.x branch has been bumped to the 6.7.0 version, this commit adds knowledge of the 6.7.0 version to the master branch. --- server/src/main/java/org/elasticsearch/Version.java | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index fec82dd12b3..07df1b646d5 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -118,6 +118,8 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_6_5_4 = new Version(V_6_5_4_ID, org.apache.lucene.util.Version.LUCENE_7_5_0); public static final int V_6_6_0_ID = 6060099; public static final Version V_6_6_0 = new Version(V_6_6_0_ID, org.apache.lucene.util.Version.LUCENE_7_6_0); + public static final int V_6_7_0_ID = 6070099; + public static final Version V_6_7_0 = new Version(V_6_7_0_ID, org.apache.lucene.util.Version.LUCENE_7_6_0); public static final int V_7_0_0_ID = 7000099; public static final Version V_7_0_0 = new Version(V_7_0_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final Version CURRENT = V_7_0_0; @@ -136,6 +138,8 @@ public class Version implements Comparable, ToXContentFragment { switch (id) { case V_7_0_0_ID: return V_7_0_0; + case V_6_7_0_ID: + return V_6_7_0; case V_6_6_0_ID: return V_6_6_0; case V_6_5_4_ID: From d43cbdab9793587daf30256fd33ba4f2a1d6b7d2 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Wed, 19 Dec 2018 13:43:43 +0000 Subject: [PATCH 17/20] [ML] ensure the ml-config index (#36792) (#36832) --- .../persistence/AnomalyDetectorsIndex.java | 2 + .../xpack/ml/MachineLearning.java | 4 +- .../ml/MlConfigMigrationEligibilityCheck.java | 18 +++ .../xpack/ml/MlConfigMigrator.java | 78 +++++++++---- .../persistence/DatafeedConfigProvider.java | 23 ++-- .../ml/job/persistence/JobConfigProvider.java | 36 ++++-- .../autodetect/AutodetectProcessManager.java | 5 +- ...lConfigMigrationEligibilityCheckTests.java | 106 +++++++++++++++--- .../ml/integration/MlConfigMigratorIT.java | 69 +++++++++++- .../xpack/ml/job/JobManagerTests.java | 34 +++++- 10 files changed, 305 insertions(+), 70 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/AnomalyDetectorsIndex.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/AnomalyDetectorsIndex.java index b7b104e35cd..673e796ef7e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/AnomalyDetectorsIndex.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/AnomalyDetectorsIndex.java @@ -10,6 +10,8 @@ package org.elasticsearch.xpack.core.ml.job.persistence; */ public final class AnomalyDetectorsIndex { + public static final int CONFIG_INDEX_MAX_RESULTS_WINDOW = 10_000; + private AnomalyDetectorsIndex() { } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java index 65147abf20b..5310a92fc20 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java @@ -670,7 +670,9 @@ public class MachineLearning extends Plugin implements ActionPlugin, AnalysisPlu // least possible burden on Elasticsearch .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS, "0-1") - .put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), delayedNodeTimeOutSetting)) + .put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), delayedNodeTimeOutSetting) + .put(IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), + AnomalyDetectorsIndex.CONFIG_INDEX_MAX_RESULTS_WINDOW)) .version(Version.CURRENT.id) .putMapping(ElasticsearchMappings.DOC_TYPE, Strings.toString(configMapping)) .build(); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlConfigMigrationEligibilityCheck.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlConfigMigrationEligibilityCheck.java index 0f127919ac3..72cb52424c3 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlConfigMigrationEligibilityCheck.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlConfigMigrationEligibilityCheck.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.ml; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; @@ -14,6 +15,7 @@ import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; /** * Checks whether migration can start and whether ML resources (e.g. jobs, datafeeds) @@ -37,10 +39,12 @@ public class MlConfigMigrationEligibilityCheck { this.isConfigMigrationEnabled = configMigrationEnabled; } + /** * Can migration start? Returns: * False if config migration is disabled via the setting {@link #ENABLE_CONFIG_MIGRATION} * False if the min node version of the cluster is before {@link #MIN_NODE_VERSION} + * False if the .ml-config index shards are not active * True otherwise * @param clusterState The cluster state * @return A boolean that dictates if config migration can start @@ -54,12 +58,26 @@ public class MlConfigMigrationEligibilityCheck { if (minNodeVersion.before(MIN_NODE_VERSION)) { return false; } + + return mlConfigIndexIsAllocated(clusterState); + } + + static boolean mlConfigIndexIsAllocated(ClusterState clusterState) { + if (clusterState.metaData().hasIndex(AnomalyDetectorsIndex.configIndexName()) == false) { + return false; + } + + IndexRoutingTable routingTable = clusterState.getRoutingTable().index(AnomalyDetectorsIndex.configIndexName()); + if (routingTable == null || routingTable.allPrimaryShardsActive() == false) { + return false; + } return true; } /** * Is the job a eligible for migration? Returns: * False if {@link #canStartMigration(ClusterState)} returns {@code false} + * False if the job is not in the cluster state * False if the {@link Job#isDeleting()} * False if the job has a persistent task * True otherwise i.e. the job is present, not deleting diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlConfigMigrator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlConfigMigrator.java index c3b9626ffd0..f2fe80f3776 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlConfigMigrator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlConfigMigrator.java @@ -11,6 +11,8 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; @@ -21,6 +23,7 @@ import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; @@ -29,6 +32,7 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.MlTasks; @@ -126,19 +130,11 @@ public class MlConfigMigrator { * @param listener The success listener */ public void migrateConfigsWithoutTasks(ClusterState clusterState, ActionListener listener) { - - if (migrationEligibilityCheck.canStartMigration(clusterState) == false) { - listener.onResponse(false); - return; - } - if (migrationInProgress.compareAndSet(false, true) == false) { listener.onResponse(Boolean.FALSE); return; } - logger.debug("migrating ml configurations"); - ActionListener unMarkMigrationInProgress = ActionListener.wrap( response -> { migrationInProgress.set(false); @@ -150,19 +146,34 @@ public class MlConfigMigrator { } ); - snapshotMlMeta(MlMetadata.getMlMetadata(clusterState), ActionListener.wrap( - response -> { - // We have successfully snapshotted the ML configs so we don't need to try again - tookConfigSnapshot.set(true); + List batches = splitInBatches(clusterState); + if (batches.isEmpty()) { + unMarkMigrationInProgress.onResponse(Boolean.FALSE); + return; + } - List batches = splitInBatches(clusterState); - if (batches.isEmpty()) { - unMarkMigrationInProgress.onResponse(Boolean.FALSE); - return; - } - migrateBatches(batches, unMarkMigrationInProgress); - }, - unMarkMigrationInProgress::onFailure + if (clusterState.metaData().hasIndex(AnomalyDetectorsIndex.configIndexName()) == false) { + createConfigIndex(ActionListener.wrap( + response -> { + unMarkMigrationInProgress.onResponse(Boolean.FALSE); + }, + unMarkMigrationInProgress::onFailure + )); + return; + } + + if (migrationEligibilityCheck.canStartMigration(clusterState) == false) { + unMarkMigrationInProgress.onResponse(Boolean.FALSE); + return; + } + + snapshotMlMeta(MlMetadata.getMlMetadata(clusterState), ActionListener.wrap( + response -> { + // We have successfully snapshotted the ML configs so we don't need to try again + tookConfigSnapshot.set(true); + migrateBatches(batches, unMarkMigrationInProgress); + }, + unMarkMigrationInProgress::onFailure )); } @@ -296,6 +307,7 @@ public class MlConfigMigrator { private void addJobIndexRequests(Collection jobs, BulkRequestBuilder bulkRequestBuilder) { ToXContent.Params params = new ToXContent.MapParams(JobConfigProvider.TO_XCONTENT_PARAMS); for (Job job : jobs) { + logger.debug("adding job to migrate: " + job.getId()); bulkRequestBuilder.add(indexRequest(job, Job.documentId(job.getId()), params)); } } @@ -303,6 +315,7 @@ public class MlConfigMigrator { private void addDatafeedIndexRequests(Collection datafeedConfigs, BulkRequestBuilder bulkRequestBuilder) { ToXContent.Params params = new ToXContent.MapParams(DatafeedConfigProvider.TO_XCONTENT_PARAMS); for (DatafeedConfig datafeedConfig : datafeedConfigs) { + logger.debug("adding datafeed to migrate: " + datafeedConfig.getId()); bulkRequestBuilder.add(indexRequest(datafeedConfig, DatafeedConfig.documentId(datafeedConfig.getId()), params)); } } @@ -318,7 +331,6 @@ public class MlConfigMigrator { return indexRequest; } - // public for testing public void snapshotMlMeta(MlMetadata mlMetadata, ActionListener listener) { @@ -361,6 +373,30 @@ public class MlConfigMigrator { ); } + private void createConfigIndex(ActionListener listener) { + logger.info("creating the .ml-config index"); + CreateIndexRequest createIndexRequest = new CreateIndexRequest(AnomalyDetectorsIndex.configIndexName()); + try + { + createIndexRequest.settings( + Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS, "0-1") + .put(IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), AnomalyDetectorsIndex.CONFIG_INDEX_MAX_RESULTS_WINDOW) + ); + createIndexRequest.mapping(ElasticsearchMappings.DOC_TYPE, ElasticsearchMappings.configMapping()); + } catch (Exception e) { + logger.error("error writing the .ml-config mappings", e); + listener.onFailure(e); + return; + } + + executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, createIndexRequest, + ActionListener.wrap( + r -> listener.onResponse(r.isAcknowledged()), + listener::onFailure + ), client.admin().indices()::create); + } public static Job updateJobForMigration(Job job) { Job.Builder builder = new Job.Builder(job); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/persistence/DatafeedConfigProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/persistence/DatafeedConfigProvider.java index 5367278be9a..d9ea6cb7c32 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/persistence/DatafeedConfigProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/persistence/DatafeedConfigProvider.java @@ -73,6 +73,15 @@ import java.util.stream.Collectors; import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; +/** + * This class implements CRUD operation for the + * datafeed configuration document + * + * The number of datafeeds returned in a search it limited to + * {@link AnomalyDetectorsIndex#CONFIG_INDEX_MAX_RESULTS_WINDOW}. + * In most cases we expect 10s or 100s of datafeeds to be defined and + * a search for all datafeeds should return all. + */ public class DatafeedConfigProvider { private static final Logger logger = LogManager.getLogger(DatafeedConfigProvider.class); @@ -87,13 +96,6 @@ public class DatafeedConfigProvider { TO_XCONTENT_PARAMS = Collections.unmodifiableMap(modifiable); } - /** - * In most cases we expect 10s or 100s of datafeeds to be defined and - * a search for all datafeeds should return all. - * TODO this is a temporary fix - */ - public int searchSize = 1000; - public DatafeedConfigProvider(Client client, NamedXContentRegistry xContentRegistry) { this.client = client; this.xContentRegistry = xContentRegistry; @@ -368,7 +370,9 @@ public class DatafeedConfigProvider { SearchRequest searchRequest = client.prepareSearch(AnomalyDetectorsIndex.configIndexName()) .setIndicesOptions(IndicesOptions.lenientExpandOpen()) - .setSource(sourceBuilder).request(); + .setSource(sourceBuilder) + .setSize(AnomalyDetectorsIndex.CONFIG_INDEX_MAX_RESULTS_WINDOW) + .request(); ExpandedIdsMatcher requiredMatches = new ExpandedIdsMatcher(tokens, allowNoDatafeeds); @@ -407,7 +411,6 @@ public class DatafeedConfigProvider { * wildcard then setting this true will not suppress the exception * @param listener The expanded datafeed config listener */ - // NORELEASE datafeed configs should be paged or have a mechanism to return all jobs if there are many of them public void expandDatafeedConfigs(String expression, boolean allowNoDatafeeds, ActionListener> listener) { String [] tokens = ExpandedIdsMatcher.tokenizeExpression(expression); SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().query(buildDatafeedIdQuery(tokens)); @@ -416,7 +419,7 @@ public class DatafeedConfigProvider { SearchRequest searchRequest = client.prepareSearch(AnomalyDetectorsIndex.configIndexName()) .setIndicesOptions(IndicesOptions.lenientExpandOpen()) .setSource(sourceBuilder) - .setSize(searchSize) + .setSize(AnomalyDetectorsIndex.CONFIG_INDEX_MAX_RESULTS_WINDOW) .request(); ExpandedIdsMatcher requiredMatches = new ExpandedIdsMatcher(tokens, allowNoDatafeeds); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobConfigProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobConfigProvider.java index 49f82bba033..73b1fe155fc 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobConfigProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobConfigProvider.java @@ -89,6 +89,11 @@ import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; /** * This class implements CRUD operation for the * anomaly detector job configuration document + * + * The number of jobs returned in a search it limited to + * {@link AnomalyDetectorsIndex#CONFIG_INDEX_MAX_RESULTS_WINDOW}. + * In most cases we expect 10s or 100s of jobs to be defined and + * a search for all jobs should return all. */ public class JobConfigProvider { @@ -101,13 +106,6 @@ public class JobConfigProvider { TO_XCONTENT_PARAMS = Collections.unmodifiableMap(modifiable); } - /** - * In most cases we expect 10s or 100s of jobs to be defined and - * a search for all jobs should return all. - * TODO this is a temporary fix - */ - private int searchSize = 1000; - private final Client client; public JobConfigProvider(Client client) { @@ -565,7 +563,7 @@ public class JobConfigProvider { SearchRequest searchRequest = client.prepareSearch(AnomalyDetectorsIndex.configIndexName()) .setIndicesOptions(IndicesOptions.lenientExpandOpen()) .setSource(sourceBuilder) - .setSize(searchSize) + .setSize(AnomalyDetectorsIndex.CONFIG_INDEX_MAX_RESULTS_WINDOW) .request(); ExpandedIdsMatcher requiredMatches = new ExpandedIdsMatcher(tokens, allowNoJobs); @@ -599,6 +597,21 @@ public class JobConfigProvider { } + private SearchRequest makeExpandIdsSearchRequest(String expression, boolean excludeDeleting) { + String [] tokens = ExpandedIdsMatcher.tokenizeExpression(expression); + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().query(buildQuery(tokens, excludeDeleting)); + sourceBuilder.sort(Job.ID.getPreferredName()); + sourceBuilder.fetchSource(false); + sourceBuilder.docValueField(Job.ID.getPreferredName(), DocValueFieldsContext.USE_DEFAULT_FORMAT); + sourceBuilder.docValueField(Job.GROUPS.getPreferredName(), DocValueFieldsContext.USE_DEFAULT_FORMAT); + + return client.prepareSearch(AnomalyDetectorsIndex.configIndexName()) + .setIndicesOptions(IndicesOptions.lenientExpandOpen()) + .setSource(sourceBuilder) + .setSize(AnomalyDetectorsIndex.CONFIG_INDEX_MAX_RESULTS_WINDOW) + .request(); + } + /** * The same logic as {@link #expandJobsIds(String, boolean, boolean, ActionListener)} but * the full anomaly detector job configuration is returned. @@ -612,7 +625,6 @@ public class JobConfigProvider { * @param excludeDeleting If true exclude jobs marked as deleting * @param listener The expanded jobs listener */ - // NORELEASE jobs should be paged or have a mechanism to return all jobs if there are many of them public void expandJobs(String expression, boolean allowNoJobs, boolean excludeDeleting, ActionListener> listener) { String [] tokens = ExpandedIdsMatcher.tokenizeExpression(expression); SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().query(buildQuery(tokens, excludeDeleting)); @@ -621,7 +633,7 @@ public class JobConfigProvider { SearchRequest searchRequest = client.prepareSearch(AnomalyDetectorsIndex.configIndexName()) .setIndicesOptions(IndicesOptions.lenientExpandOpen()) .setSource(sourceBuilder) - .setSize(searchSize) + .setSize(AnomalyDetectorsIndex.CONFIG_INDEX_MAX_RESULTS_WINDOW) .request(); ExpandedIdsMatcher requiredMatches = new ExpandedIdsMatcher(tokens, allowNoJobs); @@ -679,7 +691,7 @@ public class JobConfigProvider { SearchRequest searchRequest = client.prepareSearch(AnomalyDetectorsIndex.configIndexName()) .setIndicesOptions(IndicesOptions.lenientExpandOpen()) .setSource(sourceBuilder) - .setSize(searchSize) + .setSize(AnomalyDetectorsIndex.CONFIG_INDEX_MAX_RESULTS_WINDOW) .request(); executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, searchRequest, @@ -741,7 +753,7 @@ public class JobConfigProvider { SearchRequest searchRequest = client.prepareSearch(AnomalyDetectorsIndex.configIndexName()) .setIndicesOptions(IndicesOptions.lenientExpandOpen()) .setSource(sourceBuilder) - .setSize(searchSize) + .setSize(AnomalyDetectorsIndex.CONFIG_INDEX_MAX_RESULTS_WINDOW) .request(); executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, searchRequest, diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java index ff946f09cac..a1c8cadce60 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java @@ -45,13 +45,12 @@ import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.action.TransportOpenJobAction.JobTask; import org.elasticsearch.xpack.ml.job.JobManager; import org.elasticsearch.xpack.ml.job.persistence.JobDataCountsPersister; -import org.elasticsearch.xpack.ml.job.persistence.JobResultsProvider; import org.elasticsearch.xpack.ml.job.persistence.JobRenormalizedResultsPersister; import org.elasticsearch.xpack.ml.job.persistence.JobResultsPersister; +import org.elasticsearch.xpack.ml.job.persistence.JobResultsProvider; import org.elasticsearch.xpack.ml.job.persistence.ScheduledEventsQueryBuilder; import org.elasticsearch.xpack.ml.job.persistence.StateStreamer; import org.elasticsearch.xpack.ml.job.process.DataCountsReporter; -import org.elasticsearch.xpack.ml.process.NativeStorageProvider; import org.elasticsearch.xpack.ml.job.process.autodetect.output.AutoDetectResultProcessor; import org.elasticsearch.xpack.ml.job.process.autodetect.params.AutodetectParams; import org.elasticsearch.xpack.ml.job.process.autodetect.params.DataLoadParams; @@ -62,6 +61,7 @@ import org.elasticsearch.xpack.ml.job.process.normalizer.Renormalizer; import org.elasticsearch.xpack.ml.job.process.normalizer.ScoresUpdater; import org.elasticsearch.xpack.ml.job.process.normalizer.ShortCircuitingRenormalizer; import org.elasticsearch.xpack.ml.notifications.Auditor; +import org.elasticsearch.xpack.ml.process.NativeStorageProvider; import java.io.IOException; import java.io.InputStream; @@ -408,7 +408,6 @@ public class AutodetectProcessManager { logger.info("Opening job [{}]", jobId); jobManager.getJob(jobId, ActionListener.wrap( - // NORELEASE JIndex. Should not be doing this work on the network thread job -> { if (job.getJobVersion() == null) { closeHandler.accept(ExceptionsHelper.badRequestException("Cannot open job [" + jobId diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlConfigMigrationEligibilityCheckTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlConfigMigrationEligibilityCheckTests.java index fec071c4641..4785f9f75a5 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlConfigMigrationEligibilityCheckTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlConfigMigrationEligibilityCheckTests.java @@ -8,13 +8,22 @@ package org.elasticsearch.xpack.ml; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.cluster.routing.IndexShardRoutingTable; +import org.elasticsearch.cluster.routing.RecoverySource; +import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.MlMetadata; @@ -24,6 +33,7 @@ import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.config.JobTests; +import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; import org.junit.Before; import java.net.InetAddress; @@ -53,12 +63,18 @@ public class MlConfigMigrationEligibilityCheckTests extends ESTestCase { } public void testCanStartMigration_givenNodesNotUpToVersion() { + MetaData.Builder metaData = MetaData.builder(); + RoutingTable.Builder routingTable = RoutingTable.builder(); + addMlConfigIndex(metaData, routingTable); + // mixed 6.5 and 6.6 nodes ClusterState clusterState = ClusterState.builder(new ClusterName("_name")) - .nodes(DiscoveryNodes.builder() - .add(new DiscoveryNode("node_id1", new TransportAddress(InetAddress.getLoopbackAddress(), 9300), Version.V_6_5_0)) - .add(new DiscoveryNode("node_id2", new TransportAddress(InetAddress.getLoopbackAddress(), 9301), Version.V_6_6_0))) - .build(); + .nodes(DiscoveryNodes.builder() + .add(new DiscoveryNode("node_id1", new TransportAddress(InetAddress.getLoopbackAddress(), 9300), Version.V_6_5_0)) + .add(new DiscoveryNode("node_id2", new TransportAddress(InetAddress.getLoopbackAddress(), 9301), Version.V_6_6_0))) + .routingTable(routingTable.build()) + .metaData(metaData) + .build(); Settings settings = newSettings(true); givenClusterSettings(settings); @@ -69,12 +85,18 @@ public class MlConfigMigrationEligibilityCheckTests extends ESTestCase { } public void testCanStartMigration_givenNodesNotUpToVersionAndMigrationIsEnabled() { + MetaData.Builder metaData = MetaData.builder(); + RoutingTable.Builder routingTable = RoutingTable.builder(); + addMlConfigIndex(metaData, routingTable); + // mixed 6.5 and 6.6 nodes ClusterState clusterState = ClusterState.builder(new ClusterName("_name")) - .nodes(DiscoveryNodes.builder() - .add(new DiscoveryNode("node_id1", new TransportAddress(InetAddress.getLoopbackAddress(), 9300), Version.V_6_6_0)) - .add(new DiscoveryNode("node_id2", new TransportAddress(InetAddress.getLoopbackAddress(), 9301), Version.V_6_6_0))) - .build(); + .nodes(DiscoveryNodes.builder() + .add(new DiscoveryNode("node_id1", new TransportAddress(InetAddress.getLoopbackAddress(), 9300), Version.V_6_6_0)) + .add(new DiscoveryNode("node_id2", new TransportAddress(InetAddress.getLoopbackAddress(), 9301), Version.V_6_6_0))) + .routingTable(routingTable.build()) + .metaData(metaData) + .build(); Settings settings = newSettings(true); givenClusterSettings(settings); @@ -84,6 +106,52 @@ public class MlConfigMigrationEligibilityCheckTests extends ESTestCase { assertTrue(check.canStartMigration(clusterState)); } + public void testCanStartMigration_givenMissingIndex() { + Settings settings = newSettings(true); + givenClusterSettings(settings); + + ClusterState clusterState = ClusterState.builder(new ClusterName("migratortests")) + .build(); + + MlConfigMigrationEligibilityCheck check = new MlConfigMigrationEligibilityCheck(settings, clusterService); + assertFalse(check.canStartMigration(clusterState)); + } + + public void testCanStartMigration_givenInactiveShards() { + Settings settings = newSettings(true); + givenClusterSettings(settings); + + // index is present but no routing + MetaData.Builder metaData = MetaData.builder(); + RoutingTable.Builder routingTable = RoutingTable.builder(); + addMlConfigIndex(metaData, routingTable); + ClusterState clusterState = ClusterState.builder(new ClusterName("migratortests")) + .metaData(metaData) + .build(); + + MlConfigMigrationEligibilityCheck check = new MlConfigMigrationEligibilityCheck(settings, clusterService); + assertFalse(check.canStartMigration(clusterState)); + } + + private void addMlConfigIndex(MetaData.Builder metaData, RoutingTable.Builder routingTable) { + IndexMetaData.Builder indexMetaData = IndexMetaData.builder(AnomalyDetectorsIndex.configIndexName()); + indexMetaData.settings(Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + ); + metaData.put(indexMetaData); + Index index = new Index(AnomalyDetectorsIndex.configIndexName(), "_uuid"); + ShardId shardId = new ShardId(index, 0); + ShardRouting shardRouting = ShardRouting.newUnassigned(shardId, true, RecoverySource.EmptyStoreRecoverySource.INSTANCE, + new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "")); + shardRouting = shardRouting.initialize("node_id", null, 0L); + shardRouting = shardRouting.moveToStarted(); + routingTable.add(IndexRoutingTable.builder(index) + .addIndexShard(new IndexShardRoutingTable.Builder(shardId).addShard(shardRouting).build())); + } + + public void testJobIsEligibleForMigration_givenNodesNotUpToVersion() { // mixed 6.5 and 6.6 nodes ClusterState clusterState = ClusterState.builder(new ClusterName("_name")) @@ -185,11 +253,14 @@ public class MlConfigMigrationEligibilityCheckTests extends ESTestCase { Job closedJob = JobTests.buildJobBuilder("closed-job").build(); MlMetadata.Builder mlMetadata = new MlMetadata.Builder().putJob(closedJob, false); + MetaData.Builder metaData = MetaData.builder(); + RoutingTable.Builder routingTable = RoutingTable.builder(); + addMlConfigIndex(metaData, routingTable); + ClusterState clusterState = ClusterState.builder(new ClusterName("migratortests")) - .metaData(MetaData.builder() - .putCustom(MlMetadata.TYPE, mlMetadata.build()) - ) - .build(); + .metaData(metaData.putCustom(MlMetadata.TYPE, mlMetadata.build())) + .routingTable(routingTable.build()) + .build(); Settings settings = newSettings(true); givenClusterSettings(settings); @@ -283,11 +354,14 @@ public class MlConfigMigrationEligibilityCheckTests extends ESTestCase { mlMetadata.putDatafeed(createCompatibleDatafeed(job.getId()), Collections.emptyMap()); String datafeedId = "df-" + job.getId(); + MetaData.Builder metaData = MetaData.builder(); + RoutingTable.Builder routingTable = RoutingTable.builder(); + addMlConfigIndex(metaData, routingTable); + ClusterState clusterState = ClusterState.builder(new ClusterName("migratortests")) - .metaData(MetaData.builder() - .putCustom(MlMetadata.TYPE, mlMetadata.build()) - ) - .build(); + .metaData(metaData.putCustom(MlMetadata.TYPE, mlMetadata.build())) + .routingTable(routingTable.build()) + .build(); Settings settings = newSettings(true); givenClusterSettings(settings); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlConfigMigratorIT.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlConfigMigratorIT.java index d98abea5553..87c0e4ac824 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlConfigMigratorIT.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlConfigMigratorIT.java @@ -5,12 +5,20 @@ */ package org.elasticsearch.xpack.ml.integration; +import org.elasticsearch.Version; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.cluster.routing.IndexShardRoutingTable; +import org.elasticsearch.cluster.routing.RecoverySource; +import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; @@ -19,6 +27,8 @@ import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.job.config.Job; @@ -117,9 +127,12 @@ public class MlConfigMigratorIT extends MlSingleNodeTestCase { builder.setIndices(Collections.singletonList("beats*")); mlMetadata.putDatafeed(builder.build(), Collections.emptyMap()); + MetaData.Builder metaData = MetaData.builder(); + RoutingTable.Builder routingTable = RoutingTable.builder(); + addMlConfigIndex(metaData, routingTable); ClusterState clusterState = ClusterState.builder(new ClusterName("_name")) - .metaData(MetaData.builder() - .putCustom(MlMetadata.TYPE, mlMetadata.build())) + .metaData(metaData.putCustom(MlMetadata.TYPE, mlMetadata.build())) + .routingTable(routingTable.build()) .build(); doAnswer(invocation -> { @@ -180,10 +193,13 @@ public class MlConfigMigratorIT extends MlSingleNodeTestCase { mlMetadata.putDatafeed(builder.build(), Collections.emptyMap()); } + MetaData.Builder metaData = MetaData.builder(); + RoutingTable.Builder routingTable = RoutingTable.builder(); + addMlConfigIndex(metaData, routingTable); ClusterState clusterState = ClusterState.builder(new ClusterName("_name")) - .metaData(MetaData.builder() - .putCustom(MlMetadata.TYPE, mlMetadata.build())) - .build(); + .metaData(metaData.putCustom(MlMetadata.TYPE, mlMetadata.build())) + .routingTable(routingTable.build()) + .build(); doAnswer(invocation -> { ClusterStateUpdateTask listener = (ClusterStateUpdateTask) invocation.getArguments()[1]; @@ -304,6 +320,49 @@ public class MlConfigMigratorIT extends MlSingleNodeTestCase { assertEquals(expectedMlMetadata, recoveredMeta); } } + + private void addMlConfigIndex(MetaData.Builder metaData, RoutingTable.Builder routingTable) { + IndexMetaData.Builder indexMetaData = IndexMetaData.builder(AnomalyDetectorsIndex.configIndexName()); + indexMetaData.settings(Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + ); + metaData.put(indexMetaData); + Index index = new Index(AnomalyDetectorsIndex.configIndexName(), "_uuid"); + ShardId shardId = new ShardId(index, 0); + ShardRouting shardRouting = ShardRouting.newUnassigned(shardId, true, RecoverySource.EmptyStoreRecoverySource.INSTANCE, + new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "")); + shardRouting = shardRouting.initialize("node_id", null, 0L); + shardRouting = shardRouting.moveToStarted(); + routingTable.add(IndexRoutingTable.builder(index) + .addIndexShard(new IndexShardRoutingTable.Builder(shardId).addShard(shardRouting).build())); + } + + public void testConfigIndexIsCreated() throws Exception { + // and jobs and datafeeds clusterstate + MlMetadata.Builder mlMetadata = new MlMetadata.Builder(); + mlMetadata.putJob(buildJobBuilder("job-foo").build(), false); + + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")) + .metaData(MetaData.builder().putCustom(MlMetadata.TYPE, mlMetadata.build())) + .build(); + + AtomicReference exceptionHolder = new AtomicReference<>(); + AtomicReference responseHolder = new AtomicReference<>(); + MlConfigMigrator mlConfigMigrator = new MlConfigMigrator(nodeSettings(), client(), clusterService); + + // if the cluster state has a job config and the index does not + // exist it should be created + blockingCall(actionListener -> mlConfigMigrator.migrateConfigsWithoutTasks(clusterState, actionListener), + responseHolder, exceptionHolder); + + assertBusy(() -> assertTrue(configIndexExists())); + } + + private boolean configIndexExists() { + return client().admin().indices().prepareExists(AnomalyDetectorsIndex.configIndexName()).get().isExists(); + } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/JobManagerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/JobManagerTests.java index a24950ed091..c52a5a592d8 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/JobManagerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/JobManagerTests.java @@ -8,13 +8,21 @@ package org.elasticsearch.xpack.ml.job; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.AckedClusterStateUpdateTask; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.cluster.routing.IndexShardRoutingTable; +import org.elasticsearch.cluster.routing.RecoverySource; +import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.document.DocumentField; @@ -25,7 +33,9 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.index.Index; import org.elasticsearch.index.analysis.AnalysisRegistry; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; @@ -448,9 +458,29 @@ public class JobManagerTests extends ESTestCase { public void testUpdateJob_notAllowedPreMigration() { MlMetadata.Builder mlmetadata = new MlMetadata.Builder().putJob(buildJobBuilder("closed-job-not-migrated").build(), false); + + MetaData.Builder metaData = MetaData.builder(); + RoutingTable.Builder routingTable = RoutingTable.builder(); + + IndexMetaData.Builder indexMetaData = IndexMetaData.builder(AnomalyDetectorsIndex.configIndexName()); + indexMetaData.settings(Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + ); + metaData.put(indexMetaData); + Index index = new Index(AnomalyDetectorsIndex.configIndexName(), "_uuid"); + ShardId shardId = new ShardId(index, 0); + ShardRouting shardRouting = ShardRouting.newUnassigned(shardId, true, RecoverySource.EmptyStoreRecoverySource.INSTANCE, + new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "")); + shardRouting = shardRouting.initialize("node_id", null, 0L); + shardRouting = shardRouting.moveToStarted(); + routingTable.add(IndexRoutingTable.builder(index) + .addIndexShard(new IndexShardRoutingTable.Builder(shardId).addShard(shardRouting).build())); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")) - .metaData(MetaData.builder() - .putCustom(MlMetadata.TYPE, mlmetadata.build())) + .metaData(metaData.putCustom(MlMetadata.TYPE, mlmetadata.build())) + .routingTable(routingTable.build()) .build(); when(clusterService.state()).thenReturn(clusterState); From a34a3532ce135691f858a09c11206f73c8ed4da2 Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Wed, 19 Dec 2018 15:29:09 +0100 Subject: [PATCH 18/20] [Javadoc]: Remove lucene tags (#36834) Remove lucene tags as they break gradle javadoc task Relates #36794 --- .../src/main/java/org/apache/lucene/document/XLatLonShape.java | 1 - .../org/apache/lucene/document/XLatLonShapeBoundingBoxQuery.java | 1 - .../java/org/apache/lucene/document/XLatLonShapeLineQuery.java | 1 - .../org/apache/lucene/document/XLatLonShapePolygonQuery.java | 1 - .../main/java/org/apache/lucene/document/XLatLonShapeQuery.java | 1 - server/src/main/java/org/apache/lucene/geo/XRectangle2D.java | 1 - server/src/main/java/org/apache/lucene/geo/XTessellator.java | 1 - 7 files changed, 7 deletions(-) diff --git a/server/src/main/java/org/apache/lucene/document/XLatLonShape.java b/server/src/main/java/org/apache/lucene/document/XLatLonShape.java index 4fd92f4508c..87ebb6a753f 100644 --- a/server/src/main/java/org/apache/lucene/document/XLatLonShape.java +++ b/server/src/main/java/org/apache/lucene/document/XLatLonShape.java @@ -52,7 +52,6 @@ import static org.apache.lucene.geo.GeoEncodingUtils.encodeLongitude; * @see PointValues * @see LatLonDocValuesField * - * @lucene.experimental */ public class XLatLonShape { public static final int BYTES = LatLonPoint.BYTES; diff --git a/server/src/main/java/org/apache/lucene/document/XLatLonShapeBoundingBoxQuery.java b/server/src/main/java/org/apache/lucene/document/XLatLonShapeBoundingBoxQuery.java index bcf664719b7..8e30302225f 100644 --- a/server/src/main/java/org/apache/lucene/document/XLatLonShapeBoundingBoxQuery.java +++ b/server/src/main/java/org/apache/lucene/document/XLatLonShapeBoundingBoxQuery.java @@ -26,7 +26,6 @@ import org.apache.lucene.index.PointValues.Relation; *

The field must be indexed using * {@link XLatLonShape#createIndexableFields} added per document. * - * @lucene.experimental **/ final class XLatLonShapeBoundingBoxQuery extends XLatLonShapeQuery { final XRectangle2D rectangle2D; diff --git a/server/src/main/java/org/apache/lucene/document/XLatLonShapeLineQuery.java b/server/src/main/java/org/apache/lucene/document/XLatLonShapeLineQuery.java index 90905e0d83f..4bbb077303e 100644 --- a/server/src/main/java/org/apache/lucene/document/XLatLonShapeLineQuery.java +++ b/server/src/main/java/org/apache/lucene/document/XLatLonShapeLineQuery.java @@ -41,7 +41,6 @@ import java.util.Arrays; *

The field must be indexed using * {@link XLatLonShape#createIndexableFields} added per document. * - * @lucene.experimental **/ final class XLatLonShapeLineQuery extends XLatLonShapeQuery { final Line[] lines; diff --git a/server/src/main/java/org/apache/lucene/document/XLatLonShapePolygonQuery.java b/server/src/main/java/org/apache/lucene/document/XLatLonShapePolygonQuery.java index 5e97828aae2..5b67d8c0bc9 100644 --- a/server/src/main/java/org/apache/lucene/document/XLatLonShapePolygonQuery.java +++ b/server/src/main/java/org/apache/lucene/document/XLatLonShapePolygonQuery.java @@ -31,7 +31,6 @@ import java.util.Arrays; *

The field must be indexed using * {@link XLatLonShape#createIndexableFields} added per document. * - * @lucene.experimental **/ final class XLatLonShapePolygonQuery extends XLatLonShapeQuery { final Polygon[] polygons; diff --git a/server/src/main/java/org/apache/lucene/document/XLatLonShapeQuery.java b/server/src/main/java/org/apache/lucene/document/XLatLonShapeQuery.java index 7aded1337e4..f4c67872cdc 100644 --- a/server/src/main/java/org/apache/lucene/document/XLatLonShapeQuery.java +++ b/server/src/main/java/org/apache/lucene/document/XLatLonShapeQuery.java @@ -45,7 +45,6 @@ import java.util.Objects; * * Note: this class implements the majority of the INTERSECTS, WITHIN, DISJOINT relation logic * - * @lucene.experimental **/ abstract class XLatLonShapeQuery extends Query { /** field name */ diff --git a/server/src/main/java/org/apache/lucene/geo/XRectangle2D.java b/server/src/main/java/org/apache/lucene/geo/XRectangle2D.java index af06d0a0e39..0267ba29b86 100644 --- a/server/src/main/java/org/apache/lucene/geo/XRectangle2D.java +++ b/server/src/main/java/org/apache/lucene/geo/XRectangle2D.java @@ -38,7 +38,6 @@ import static org.apache.lucene.geo.GeoUtils.orient; /** * 2D rectangle implementation containing spatial logic. * - * @lucene.internal */ public class XRectangle2D { final byte[] bbox; diff --git a/server/src/main/java/org/apache/lucene/geo/XTessellator.java b/server/src/main/java/org/apache/lucene/geo/XTessellator.java index 416b501202b..48091439ba9 100644 --- a/server/src/main/java/org/apache/lucene/geo/XTessellator.java +++ b/server/src/main/java/org/apache/lucene/geo/XTessellator.java @@ -64,7 +64,6 @@ import static org.apache.lucene.geo.GeoUtils.orient; * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF * THIS SOFTWARE. * - * @lucene.experimental */ public final class XTessellator { // this is a dumb heuristic to control whether we cut over to sorted morton values From d31eaf731393fafc35beac4867edb2f562a4f816 Mon Sep 17 00:00:00 2001 From: Andrei Stefan Date: Wed, 19 Dec 2018 16:36:16 +0200 Subject: [PATCH 19/20] SQL: protocol returns ISO 8601 String formatted dates instead of Long for JDBC/ODBC requests (#36800) * Change the way the protocol returns date fields from Long values in case of JDBC/ODBC, to ISO 8601 with millis String. --- .../xpack/sql/jdbc/JdbcDateUtils.java | 81 +++++++++++++++++++ .../xpack/sql/jdbc/JdbcResultSet.java | 6 +- .../xpack/sql/jdbc/TypeConverter.java | 18 +---- .../sql/qa/jdbc/JdbcIntegrationTestCase.java | 1 + .../xpack/sql/qa/jdbc/JdbcTestUtils.java | 9 +++ .../xpack/sql/qa/jdbc/ResultSetTestCase.java | 31 +++---- .../xpack/sql/action/SqlQueryResponse.java | 11 +-- 7 files changed, 118 insertions(+), 39 deletions(-) create mode 100644 x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcDateUtils.java diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcDateUtils.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcDateUtils.java new file mode 100644 index 00000000000..8fbef88dca5 --- /dev/null +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcDateUtils.java @@ -0,0 +1,81 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.jdbc; + +import java.sql.Date; +import java.sql.Time; +import java.sql.Timestamp; +import java.time.ZonedDateTime; +import java.time.format.DateTimeFormatter; +import java.time.format.DateTimeFormatterBuilder; +import java.util.Locale; +import java.util.function.Function; + +import static java.time.format.DateTimeFormatter.ISO_LOCAL_DATE; +import static java.time.temporal.ChronoField.HOUR_OF_DAY; +import static java.time.temporal.ChronoField.MILLI_OF_SECOND; +import static java.time.temporal.ChronoField.MINUTE_OF_HOUR; +import static java.time.temporal.ChronoField.SECOND_OF_MINUTE; + +/** + * JDBC specific datetime specific utility methods. Because of lack of visibility, this class borrows code + * from {@code org.elasticsearch.xpack.sql.util.DateUtils} and {@code org.elasticsearch.xpack.sql.proto.StringUtils}. + */ +final class JdbcDateUtils { + + private static final long DAY_IN_MILLIS = 60 * 60 * 24 * 1000; + + static final DateTimeFormatter ISO_WITH_MILLIS = new DateTimeFormatterBuilder() + .parseCaseInsensitive() + .append(ISO_LOCAL_DATE) + .appendLiteral('T') + .appendValue(HOUR_OF_DAY, 2) + .appendLiteral(':') + .appendValue(MINUTE_OF_HOUR, 2) + .appendLiteral(':') + .appendValue(SECOND_OF_MINUTE, 2) + .appendFraction(MILLI_OF_SECOND, 3, 3, true) + .appendOffsetId() + .toFormatter(Locale.ROOT); + + static long asMillisSinceEpoch(String date) { + ZonedDateTime zdt = ISO_WITH_MILLIS.parse(date, ZonedDateTime::from); + return zdt.toInstant().toEpochMilli(); + } + + static Date asDate(String date) { + return new Date(utcMillisRemoveTime(asMillisSinceEpoch(date))); + } + + static Time asTime(String date) { + return new Time(utcMillisRemoveDate(asMillisSinceEpoch(date))); + } + + static Timestamp asTimestamp(String date) { + return new Timestamp(asMillisSinceEpoch(date)); + } + + /* + * Handles the value received as parameter, as either String (a ZonedDateTime formatted in ISO 8601 standard with millis) - + * date fields being returned formatted like this. Or a Long value, in case of Histograms. + */ + static R asDateTimeField(Object value, Function asDateTimeMethod, Function ctor) { + if (value instanceof String) { + return asDateTimeMethod.apply((String) value); + } else { + return ctor.apply(((Number) value).longValue()); + } + } + + private static long utcMillisRemoveTime(long l) { + return l - (l % DAY_IN_MILLIS); + } + + private static long utcMillisRemoveDate(long l) { + return l % DAY_IN_MILLIS; + } +} diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcResultSet.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcResultSet.java index 4ef9ab4c829..d089a99b0ee 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcResultSet.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcResultSet.java @@ -30,6 +30,7 @@ import java.util.LinkedHashMap; import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.function.Function; import static java.lang.String.format; @@ -248,7 +249,10 @@ class JdbcResultSet implements ResultSet, JdbcWrapper { // the cursor can return an Integer if the date-since-epoch is small enough, XContentParser (Jackson) will // return the "smallest" data type for numbers when parsing // TODO: this should probably be handled server side - return val == null ? null : ((Number) val).longValue(); + if (val == null) { + return null; + } + return JdbcDateUtils.asDateTimeField(val, JdbcDateUtils::asMillisSinceEpoch, Function.identity()); }; return val == null ? null : (Long) val; } catch (ClassCastException cce) { diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/TypeConverter.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/TypeConverter.java index a287d77191c..80f00ea3bbe 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/TypeConverter.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/TypeConverter.java @@ -48,8 +48,6 @@ final class TypeConverter { private TypeConverter() {} - private static final long DAY_IN_MILLIS = 60 * 60 * 24 * 1000; - /** * Converts millisecond after epoc to date */ @@ -216,7 +214,7 @@ final class TypeConverter { case FLOAT: return floatValue(v); // Float might be represented as string for infinity and NaN values case DATE: - return new Timestamp(((Number) v).longValue()); + return JdbcDateUtils.asDateTimeField(v, JdbcDateUtils::asTimestamp, Timestamp::new); case INTERVAL_YEAR: case INTERVAL_MONTH: case INTERVAL_YEAR_TO_MONTH: @@ -470,21 +468,21 @@ final class TypeConverter { private static Date asDate(Object val, EsType columnType, String typeString) throws SQLException { if (columnType == EsType.DATE) { - return new Date(utcMillisRemoveTime(((Number) val).longValue())); + return JdbcDateUtils.asDateTimeField(val, JdbcDateUtils::asDate, Date::new); } return failConversion(val, columnType, typeString, Date.class); } private static Time asTime(Object val, EsType columnType, String typeString) throws SQLException { if (columnType == EsType.DATE) { - return new Time(utcMillisRemoveDate(((Number) val).longValue())); + return JdbcDateUtils.asDateTimeField(val, JdbcDateUtils::asTime, Time::new); } return failConversion(val, columnType, typeString, Time.class); } private static Timestamp asTimestamp(Object val, EsType columnType, String typeString) throws SQLException { if (columnType == EsType.DATE) { - return new Timestamp(((Number) val).longValue()); + return JdbcDateUtils.asDateTimeField(val, JdbcDateUtils::asTimestamp, Timestamp::new); } return failConversion(val, columnType, typeString, Timestamp.class); } @@ -513,14 +511,6 @@ final class TypeConverter { throw new SQLFeatureNotSupportedException(); } - private static long utcMillisRemoveTime(long l) { - return l - (l % DAY_IN_MILLIS); - } - - private static long utcMillisRemoveDate(long l) { - return l % DAY_IN_MILLIS; - } - private static byte safeToByte(long x) throws SQLException { if (x > Byte.MAX_VALUE || x < Byte.MIN_VALUE) { throw new SQLException(format(Locale.ROOT, "Numeric %s out of range", Long.toString(x))); diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcIntegrationTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcIntegrationTestCase.java index 02d5f7c06ca..039c15359eb 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcIntegrationTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcIntegrationTestCase.java @@ -31,6 +31,7 @@ import java.util.Set; import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.assertNoSearchContexts; public abstract class JdbcIntegrationTestCase extends ESRestTestCase { + @After public void checkSearchContent() throws Exception { // Some context might linger due to fire and forget nature of scroll cleanup diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcTestUtils.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcTestUtils.java index 08eac9b8a2c..becb76f4e0e 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcTestUtils.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcTestUtils.java @@ -12,6 +12,9 @@ import org.elasticsearch.xpack.sql.proto.ColumnInfo; import java.sql.ResultSet; import java.sql.ResultSetMetaData; import java.sql.SQLException; +import java.time.Instant; +import java.time.ZoneId; +import java.time.ZonedDateTime; import java.util.ArrayList; import java.util.List; @@ -20,6 +23,8 @@ public abstract class JdbcTestUtils { public static final String SQL_TRACE = "org.elasticsearch.xpack.sql:TRACE"; public static final String JDBC_TIMEZONE = "timezone"; + + public static ZoneId UTC = ZoneId.of("Z"); public static void logResultSetMetadata(ResultSet rs, Logger logger) throws SQLException { ResultSetMetaData metaData = rs.getMetaData(); @@ -128,4 +133,8 @@ public abstract class JdbcTestUtils { CliFormatter formatter = new CliFormatter(cols, data); logger.info("\n" + formatter.formatWithHeader(cols, data)); } + + public static ZonedDateTime of(long millis) { + return ZonedDateTime.ofInstant(Instant.ofEpochMilli(millis), UTC); + } } \ No newline at end of file diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ResultSetTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ResultSetTestCase.java index 1db64874a7c..3cb87085258 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ResultSetTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ResultSetTestCase.java @@ -58,6 +58,7 @@ import static java.util.Calendar.MONTH; import static java.util.Calendar.SECOND; import static java.util.Calendar.YEAR; import static org.elasticsearch.xpack.sql.qa.jdbc.JdbcTestUtils.JDBC_TIMEZONE; +import static org.elasticsearch.xpack.sql.qa.jdbc.JdbcTestUtils.of; public class ResultSetTestCase extends JdbcIntegrationTestCase { @@ -200,10 +201,10 @@ public class ResultSetTestCase extends JdbcIntegrationTestCase { sqle.getMessage()); sqle = expectThrows(SQLException.class, () -> results.getByte("test_date")); - assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Byte]", randomDate), + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Byte]", of(randomDate)), sqle.getMessage()); sqle = expectThrows(SQLException.class, () -> results.getObject("test_date", Byte.class)); - assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Byte]", randomDate), + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Byte]", of(randomDate)), sqle.getMessage()); }); } @@ -323,10 +324,10 @@ public class ResultSetTestCase extends JdbcIntegrationTestCase { sqle.getMessage()); sqle = expectThrows(SQLException.class, () -> results.getShort("test_date")); - assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Short]", randomDate), + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Short]", of(randomDate)), sqle.getMessage()); sqle = expectThrows(SQLException.class, () -> results.getObject("test_date", Short.class)); - assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Short]", randomDate), + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Short]", of(randomDate)), sqle.getMessage()); }); } @@ -438,10 +439,10 @@ public class ResultSetTestCase extends JdbcIntegrationTestCase { sqle.getMessage()); sqle = expectThrows(SQLException.class, () -> results.getInt("test_date")); - assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Integer]", randomDate), + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Integer]", of(randomDate)), sqle.getMessage()); sqle = expectThrows(SQLException.class, () -> results.getObject("test_date", Integer.class)); - assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Integer]", randomDate), + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Integer]", of(randomDate)), sqle.getMessage()); }); } @@ -540,10 +541,10 @@ public class ResultSetTestCase extends JdbcIntegrationTestCase { sqle.getMessage()); sqle = expectThrows(SQLException.class, () -> results.getLong("test_date")); - assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Long]", randomDate), + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Long]", of(randomDate)), sqle.getMessage()); sqle = expectThrows(SQLException.class, () -> results.getObject("test_date", Long.class)); - assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Long]", randomDate), + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Long]", of(randomDate)), sqle.getMessage()); }); } @@ -623,10 +624,10 @@ public class ResultSetTestCase extends JdbcIntegrationTestCase { sqle.getMessage()); sqle = expectThrows(SQLException.class, () -> results.getDouble("test_date")); - assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Double]", randomDate), + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Double]", of(randomDate)), sqle.getMessage()); sqle = expectThrows(SQLException.class, () -> results.getObject("test_date", Double.class)); - assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Double]", randomDate), + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Double]", of(randomDate)), sqle.getMessage()); }); } @@ -706,10 +707,10 @@ public class ResultSetTestCase extends JdbcIntegrationTestCase { sqle.getMessage()); sqle = expectThrows(SQLException.class, () -> results.getFloat("test_date")); - assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Float]", randomDate), + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Float]", of(randomDate)), sqle.getMessage()); sqle = expectThrows(SQLException.class, () -> results.getObject("test_date", Float.class)); - assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Float]", randomDate), + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Float]", of(randomDate)), sqle.getMessage()); }); } @@ -767,7 +768,7 @@ public class ResultSetTestCase extends JdbcIntegrationTestCase { assertEquals("Expected: but was: for field " + fld, true, results.getObject(fld, Boolean.class)); } SQLException sqle = expectThrows(SQLException.class, () -> results.getBoolean("test_date")); - assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Boolean]", randomDate1), + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Boolean]", of(randomDate1)), sqle.getMessage()); results.next(); @@ -777,11 +778,11 @@ public class ResultSetTestCase extends JdbcIntegrationTestCase { assertEquals("Expected: but was: for field " + fld, false, results.getObject(fld, Boolean.class)); } sqle = expectThrows(SQLException.class, () -> results.getBoolean("test_date")); - assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Boolean]", randomDate2), + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Boolean]", of(randomDate2)), sqle.getMessage()); sqle = expectThrows(SQLException.class, () -> results.getObject("test_date", Boolean.class)); - assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Boolean]", randomDate2), + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Boolean]", of(randomDate2)), sqle.getMessage()); results.next(); diff --git a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryResponse.java b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryResponse.java index 6206879487d..49c0758adbe 100644 --- a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryResponse.java +++ b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryResponse.java @@ -171,15 +171,8 @@ public class SqlQueryResponse extends ActionResponse implements ToXContentObject public static XContentBuilder value(XContentBuilder builder, Mode mode, Object value) throws IOException { if (value instanceof ZonedDateTime) { ZonedDateTime zdt = (ZonedDateTime) value; - if (Mode.isDriver(mode)) { - // JDBC cannot parse dates in string format and ODBC can have issues with it - // so instead, use the millis since epoch (in UTC) - builder.value(zdt.toInstant().toEpochMilli()); - } - // otherwise use the ISO format - else { - builder.value(StringUtils.toString(zdt)); - } + // use the ISO format + builder.value(StringUtils.toString(zdt)); } else { builder.value(value); } From 9584adf9d9579f59c0e98ee4d5f2cf54d55be2e7 Mon Sep 17 00:00:00 2001 From: Costin Leau Date: Wed, 19 Dec 2018 17:14:01 +0200 Subject: [PATCH 20/20] SQL: Enhance Verifier to prevent aggregate or grouping functions from (#36799) Improve Verifier to prevent aggregate or grouping functions from being used in a WHERE clause. Fix #36798 --- .../xpack/sql/analysis/analyzer/Verifier.java | 22 +++++++++++++++++-- .../sql/expression/function/Functions.java | 5 +++++ .../analyzer/VerifierErrorMessagesTests.java | 19 ++++++++++++---- 3 files changed, 40 insertions(+), 6 deletions(-) diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java index 386be1eadcc..47f68a640c7 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java @@ -223,12 +223,13 @@ public final class Verifier { validateInExpression(p, localFailures); validateConditional(p, localFailures); + checkFilterOnAggs(p, localFailures); + if (!groupingFailures.contains(p)) { checkGroupBy(p, localFailures, resolvedFunctions, groupingFailures); } checkForScoreInsideFunctions(p, localFailures); - checkNestedUsedInGroupByOrHaving(p, localFailures); // everything checks out @@ -370,7 +371,7 @@ public final class Verifier { if (!missing.isEmpty()) { String plural = missing.size() > 1 ? "s" : StringUtils.EMPTY; localFailures.add( - fail(condition, "Cannot filter HAVING on non-aggregate" + plural + " %s; consider using WHERE instead", + fail(condition, "Cannot use HAVING filter on non-aggregate" + plural + " %s; use WHERE instead", Expressions.names(missing.keySet()))); groupingFailures.add(a); return false; @@ -542,6 +543,23 @@ public final class Verifier { return false; } + private static void checkFilterOnAggs(LogicalPlan p, Set localFailures) { + if (p instanceof Filter) { + Filter filter = (Filter) p; + if ((filter.child() instanceof Aggregate) == false) { + filter.condition().forEachDown(f -> { + if (Functions.isAggregate(f) || Functions.isGrouping(f)) { + String type = Functions.isAggregate(f) ? "aggregate" : "grouping"; + localFailures.add(fail(f, + "Cannot use WHERE filtering on %s function [%s], use HAVING instead", type, Expressions.name(f))); + } + + }, Function.class); + } + } + } + + private static void checkForScoreInsideFunctions(LogicalPlan p, Set localFailures) { // Make sure that SCORE is only used in "top level" functions p.forEachExpressions(e -> diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/Functions.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/Functions.java index 7f5465b7413..46ca0ea91b4 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/Functions.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/Functions.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.sql.expression.function; import org.elasticsearch.xpack.sql.expression.Expression; import org.elasticsearch.xpack.sql.expression.function.aggregate.AggregateFunction; +import org.elasticsearch.xpack.sql.expression.function.grouping.GroupingFunction; import org.elasticsearch.xpack.sql.plan.QueryPlan; import java.util.LinkedHashMap; @@ -18,6 +19,10 @@ public abstract class Functions { return e instanceof AggregateFunction; } + public static boolean isGrouping(Expression e) { + return e instanceof GroupingFunction; + } + public static Map collectFunctions(QueryPlan plan) { Map resolvedFunctions = new LinkedHashMap<>(); plan.forEachExpressionsDown(e -> { diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java index fcb46d7f8d4..a3fd459bf3c 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java @@ -227,7 +227,7 @@ public class VerifierErrorMessagesTests extends ESTestCase { } public void testGroupByHavingNonGrouped() { - assertEquals("1:48: Cannot filter HAVING on non-aggregate [int]; consider using WHERE instead", + assertEquals("1:48: Cannot use HAVING filter on non-aggregate [int]; use WHERE instead", error("SELECT AVG(int) FROM test GROUP BY text HAVING int > 10")); } @@ -296,12 +296,12 @@ public class VerifierErrorMessagesTests extends ESTestCase { } public void testHavingOnColumn() { - assertEquals("1:42: Cannot filter HAVING on non-aggregate [int]; consider using WHERE instead", + assertEquals("1:42: Cannot use HAVING filter on non-aggregate [int]; use WHERE instead", error("SELECT int FROM test GROUP BY int HAVING int > 2")); } public void testHavingOnScalar() { - assertEquals("1:42: Cannot filter HAVING on non-aggregate [int]; consider using WHERE instead", + assertEquals("1:42: Cannot use HAVING filter on non-aggregate [int]; use WHERE instead", error("SELECT int FROM test GROUP BY int HAVING 2 < ABS(int)")); } @@ -474,4 +474,15 @@ public class VerifierErrorMessagesTests extends ESTestCase { ": expected data type [KEYWORD], value provided is of type [INTEGER]", error("SELECT * FROM test WHERE " + arbirtraryArgsfunction + "(null, null, 'foo', 4) > 1")); } -} + + public void testAggsInWhere() { + assertEquals("1:33: Cannot use WHERE filtering on aggregate function [MAX(int)], use HAVING instead", + error("SELECT MAX(int) FROM test WHERE MAX(int) > 10 GROUP BY bool")); + } + + public void testHistogramInFilter() { + assertEquals("1:63: Cannot use WHERE filtering on grouping function [HISTOGRAM(date)], use HAVING instead", + error("SELECT HISTOGRAM(date, INTERVAL 1 MONTH) AS h FROM test WHERE " + + "HISTOGRAM(date, INTERVAL 1 MONTH) > CAST('2000-01-01' AS DATE) GROUP BY h")); + } +} \ No newline at end of file